diff options
Diffstat (limited to 'drivers/net/wireless/ath9k/recv.c')
-rw-r--r-- | drivers/net/wireless/ath9k/recv.c | 1242 |
1 files changed, 288 insertions, 954 deletions
diff --git a/drivers/net/wireless/ath9k/recv.c b/drivers/net/wireless/ath9k/recv.c index 504a0444d89f..462e08c3d09d 100644 --- a/drivers/net/wireless/ath9k/recv.c +++ b/drivers/net/wireless/ath9k/recv.c | |||
@@ -14,10 +14,6 @@ | |||
14 | * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. | 14 | * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. |
15 | */ | 15 | */ |
16 | 16 | ||
17 | /* | ||
18 | * Implementation of receive path. | ||
19 | */ | ||
20 | |||
21 | #include "core.h" | 17 | #include "core.h" |
22 | 18 | ||
23 | /* | 19 | /* |
@@ -27,10 +23,7 @@ | |||
27 | * MAC acknowledges BA status as long as it copies frames to host | 23 | * MAC acknowledges BA status as long as it copies frames to host |
28 | * buffer (or rx fifo). This can incorrectly acknowledge packets | 24 | * buffer (or rx fifo). This can incorrectly acknowledge packets |
29 | * to a sender if last desc is self-linked. | 25 | * to a sender if last desc is self-linked. |
30 | * | ||
31 | * NOTE: Caller should hold the rxbuf lock. | ||
32 | */ | 26 | */ |
33 | |||
34 | static void ath_rx_buf_link(struct ath_softc *sc, struct ath_buf *bf) | 27 | static void ath_rx_buf_link(struct ath_softc *sc, struct ath_buf *bf) |
35 | { | 28 | { |
36 | struct ath_hal *ah = sc->sc_ah; | 29 | struct ath_hal *ah = sc->sc_ah; |
@@ -40,356 +33,53 @@ static void ath_rx_buf_link(struct ath_softc *sc, struct ath_buf *bf) | |||
40 | ATH_RXBUF_RESET(bf); | 33 | ATH_RXBUF_RESET(bf); |
41 | 34 | ||
42 | ds = bf->bf_desc; | 35 | ds = bf->bf_desc; |
43 | ds->ds_link = 0; /* link to null */ | 36 | ds->ds_link = 0; /* link to null */ |
44 | ds->ds_data = bf->bf_buf_addr; | 37 | ds->ds_data = bf->bf_buf_addr; |
45 | 38 | ||
46 | /* XXX For RADAR? | 39 | /* virtual addr of the beginning of the buffer. */ |
47 | * virtual addr of the beginning of the buffer. */ | ||
48 | skb = bf->bf_mpdu; | 40 | skb = bf->bf_mpdu; |
49 | ASSERT(skb != NULL); | 41 | ASSERT(skb != NULL); |
50 | ds->ds_vdata = skb->data; | 42 | ds->ds_vdata = skb->data; |
51 | 43 | ||
52 | /* setup rx descriptors. The sc_rxbufsize here tells the harware | 44 | /* setup rx descriptors. The rx.bufsize here tells the harware |
53 | * how much data it can DMA to us and that we are prepared | 45 | * how much data it can DMA to us and that we are prepared |
54 | * to process */ | 46 | * to process */ |
55 | ath9k_hw_setuprxdesc(ah, | 47 | ath9k_hw_setuprxdesc(ah, ds, |
56 | ds, | 48 | sc->rx.bufsize, |
57 | sc->sc_rxbufsize, | ||
58 | 0); | 49 | 0); |
59 | 50 | ||
60 | if (sc->sc_rxlink == NULL) | 51 | if (sc->rx.rxlink == NULL) |
61 | ath9k_hw_putrxbuf(ah, bf->bf_daddr); | 52 | ath9k_hw_putrxbuf(ah, bf->bf_daddr); |
62 | else | 53 | else |
63 | *sc->sc_rxlink = bf->bf_daddr; | 54 | *sc->rx.rxlink = bf->bf_daddr; |
64 | 55 | ||
65 | sc->sc_rxlink = &ds->ds_link; | 56 | sc->rx.rxlink = &ds->ds_link; |
66 | ath9k_hw_rxena(ah); | 57 | ath9k_hw_rxena(ah); |
67 | } | 58 | } |
68 | 59 | ||
69 | /* Process received BAR frame */ | 60 | static void ath_setdefantenna(struct ath_softc *sc, u32 antenna) |
70 | |||
71 | static int ath_bar_rx(struct ath_softc *sc, | ||
72 | struct ath_node *an, | ||
73 | struct sk_buff *skb) | ||
74 | { | ||
75 | struct ieee80211_bar *bar; | ||
76 | struct ath_arx_tid *rxtid; | ||
77 | struct sk_buff *tskb; | ||
78 | struct ath_recv_status *rx_status; | ||
79 | int tidno, index, cindex; | ||
80 | u16 seqno; | ||
81 | |||
82 | /* look at BAR contents */ | ||
83 | |||
84 | bar = (struct ieee80211_bar *)skb->data; | ||
85 | tidno = (le16_to_cpu(bar->control) & IEEE80211_BAR_CTL_TID_M) | ||
86 | >> IEEE80211_BAR_CTL_TID_S; | ||
87 | seqno = le16_to_cpu(bar->start_seq_num) >> IEEE80211_SEQ_SEQ_SHIFT; | ||
88 | |||
89 | /* process BAR - indicate all pending RX frames till the BAR seqno */ | ||
90 | |||
91 | rxtid = &an->an_aggr.rx.tid[tidno]; | ||
92 | |||
93 | spin_lock_bh(&rxtid->tidlock); | ||
94 | |||
95 | /* get relative index */ | ||
96 | |||
97 | index = ATH_BA_INDEX(rxtid->seq_next, seqno); | ||
98 | |||
99 | /* drop BAR if old sequence (index is too large) */ | ||
100 | |||
101 | if ((index > rxtid->baw_size) && | ||
102 | (index > (IEEE80211_SEQ_MAX - (rxtid->baw_size << 2)))) | ||
103 | /* discard frame, ieee layer may not treat frame as a dup */ | ||
104 | goto unlock_and_free; | ||
105 | |||
106 | /* complete receive processing for all pending frames upto BAR seqno */ | ||
107 | |||
108 | cindex = (rxtid->baw_head + index) & (ATH_TID_MAX_BUFS - 1); | ||
109 | while ((rxtid->baw_head != rxtid->baw_tail) && | ||
110 | (rxtid->baw_head != cindex)) { | ||
111 | tskb = rxtid->rxbuf[rxtid->baw_head].rx_wbuf; | ||
112 | rx_status = &rxtid->rxbuf[rxtid->baw_head].rx_status; | ||
113 | rxtid->rxbuf[rxtid->baw_head].rx_wbuf = NULL; | ||
114 | |||
115 | if (tskb != NULL) | ||
116 | ath_rx_subframe(an, tskb, rx_status); | ||
117 | |||
118 | INCR(rxtid->baw_head, ATH_TID_MAX_BUFS); | ||
119 | INCR(rxtid->seq_next, IEEE80211_SEQ_MAX); | ||
120 | } | ||
121 | |||
122 | /* ... and indicate rest of the frames in-order */ | ||
123 | |||
124 | while (rxtid->baw_head != rxtid->baw_tail && | ||
125 | rxtid->rxbuf[rxtid->baw_head].rx_wbuf != NULL) { | ||
126 | tskb = rxtid->rxbuf[rxtid->baw_head].rx_wbuf; | ||
127 | rx_status = &rxtid->rxbuf[rxtid->baw_head].rx_status; | ||
128 | rxtid->rxbuf[rxtid->baw_head].rx_wbuf = NULL; | ||
129 | |||
130 | ath_rx_subframe(an, tskb, rx_status); | ||
131 | |||
132 | INCR(rxtid->baw_head, ATH_TID_MAX_BUFS); | ||
133 | INCR(rxtid->seq_next, IEEE80211_SEQ_MAX); | ||
134 | } | ||
135 | |||
136 | unlock_and_free: | ||
137 | spin_unlock_bh(&rxtid->tidlock); | ||
138 | /* free bar itself */ | ||
139 | dev_kfree_skb(skb); | ||
140 | return IEEE80211_FTYPE_CTL; | ||
141 | } | ||
142 | |||
143 | /* Function to handle a subframe of aggregation when HT is enabled */ | ||
144 | |||
145 | static int ath_ampdu_input(struct ath_softc *sc, | ||
146 | struct ath_node *an, | ||
147 | struct sk_buff *skb, | ||
148 | struct ath_recv_status *rx_status) | ||
149 | { | ||
150 | struct ieee80211_hdr *hdr; | ||
151 | struct ath_arx_tid *rxtid; | ||
152 | struct ath_rxbuf *rxbuf; | ||
153 | u8 type, subtype; | ||
154 | u16 rxseq; | ||
155 | int tid = 0, index, cindex, rxdiff; | ||
156 | __le16 fc; | ||
157 | u8 *qc; | ||
158 | |||
159 | hdr = (struct ieee80211_hdr *)skb->data; | ||
160 | fc = hdr->frame_control; | ||
161 | |||
162 | /* collect stats of frames with non-zero version */ | ||
163 | |||
164 | if ((le16_to_cpu(hdr->frame_control) & IEEE80211_FCTL_VERS) != 0) { | ||
165 | dev_kfree_skb(skb); | ||
166 | return -1; | ||
167 | } | ||
168 | |||
169 | type = le16_to_cpu(hdr->frame_control) & IEEE80211_FCTL_FTYPE; | ||
170 | subtype = le16_to_cpu(hdr->frame_control) & IEEE80211_FCTL_STYPE; | ||
171 | |||
172 | if (ieee80211_is_back_req(fc)) | ||
173 | return ath_bar_rx(sc, an, skb); | ||
174 | |||
175 | /* special aggregate processing only for qos unicast data frames */ | ||
176 | |||
177 | if (!ieee80211_is_data(fc) || | ||
178 | !ieee80211_is_data_qos(fc) || | ||
179 | is_multicast_ether_addr(hdr->addr1)) | ||
180 | return ath_rx_subframe(an, skb, rx_status); | ||
181 | |||
182 | /* lookup rx tid state */ | ||
183 | |||
184 | if (ieee80211_is_data_qos(fc)) { | ||
185 | qc = ieee80211_get_qos_ctl(hdr); | ||
186 | tid = qc[0] & 0xf; | ||
187 | } | ||
188 | |||
189 | if (sc->sc_ah->ah_opmode == ATH9K_M_STA) { | ||
190 | /* Drop the frame not belonging to me. */ | ||
191 | if (memcmp(hdr->addr1, sc->sc_myaddr, ETH_ALEN)) { | ||
192 | dev_kfree_skb(skb); | ||
193 | return -1; | ||
194 | } | ||
195 | } | ||
196 | |||
197 | rxtid = &an->an_aggr.rx.tid[tid]; | ||
198 | |||
199 | spin_lock(&rxtid->tidlock); | ||
200 | |||
201 | rxdiff = (rxtid->baw_tail - rxtid->baw_head) & | ||
202 | (ATH_TID_MAX_BUFS - 1); | ||
203 | |||
204 | /* | ||
205 | * If the ADDBA exchange has not been completed by the source, | ||
206 | * process via legacy path (i.e. no reordering buffer is needed) | ||
207 | */ | ||
208 | if (!rxtid->addba_exchangecomplete) { | ||
209 | spin_unlock(&rxtid->tidlock); | ||
210 | return ath_rx_subframe(an, skb, rx_status); | ||
211 | } | ||
212 | |||
213 | /* extract sequence number from recvd frame */ | ||
214 | |||
215 | rxseq = le16_to_cpu(hdr->seq_ctrl) >> IEEE80211_SEQ_SEQ_SHIFT; | ||
216 | |||
217 | if (rxtid->seq_reset) { | ||
218 | rxtid->seq_reset = 0; | ||
219 | rxtid->seq_next = rxseq; | ||
220 | } | ||
221 | |||
222 | index = ATH_BA_INDEX(rxtid->seq_next, rxseq); | ||
223 | |||
224 | /* drop frame if old sequence (index is too large) */ | ||
225 | |||
226 | if (index > (IEEE80211_SEQ_MAX - (rxtid->baw_size << 2))) { | ||
227 | /* discard frame, ieee layer may not treat frame as a dup */ | ||
228 | spin_unlock(&rxtid->tidlock); | ||
229 | dev_kfree_skb(skb); | ||
230 | return IEEE80211_FTYPE_DATA; | ||
231 | } | ||
232 | |||
233 | /* sequence number is beyond block-ack window */ | ||
234 | |||
235 | if (index >= rxtid->baw_size) { | ||
236 | |||
237 | /* complete receive processing for all pending frames */ | ||
238 | |||
239 | while (index >= rxtid->baw_size) { | ||
240 | |||
241 | rxbuf = rxtid->rxbuf + rxtid->baw_head; | ||
242 | |||
243 | if (rxbuf->rx_wbuf != NULL) { | ||
244 | ath_rx_subframe(an, rxbuf->rx_wbuf, | ||
245 | &rxbuf->rx_status); | ||
246 | rxbuf->rx_wbuf = NULL; | ||
247 | } | ||
248 | |||
249 | INCR(rxtid->baw_head, ATH_TID_MAX_BUFS); | ||
250 | INCR(rxtid->seq_next, IEEE80211_SEQ_MAX); | ||
251 | |||
252 | index--; | ||
253 | } | ||
254 | } | ||
255 | |||
256 | /* add buffer to the recv ba window */ | ||
257 | |||
258 | cindex = (rxtid->baw_head + index) & (ATH_TID_MAX_BUFS - 1); | ||
259 | rxbuf = rxtid->rxbuf + cindex; | ||
260 | |||
261 | if (rxbuf->rx_wbuf != NULL) { | ||
262 | spin_unlock(&rxtid->tidlock); | ||
263 | /* duplicate frame */ | ||
264 | dev_kfree_skb(skb); | ||
265 | return IEEE80211_FTYPE_DATA; | ||
266 | } | ||
267 | |||
268 | rxbuf->rx_wbuf = skb; | ||
269 | rxbuf->rx_time = get_timestamp(); | ||
270 | rxbuf->rx_status = *rx_status; | ||
271 | |||
272 | /* advance tail if sequence received is newer | ||
273 | * than any received so far */ | ||
274 | |||
275 | if (index >= rxdiff) { | ||
276 | rxtid->baw_tail = cindex; | ||
277 | INCR(rxtid->baw_tail, ATH_TID_MAX_BUFS); | ||
278 | } | ||
279 | |||
280 | /* indicate all in-order received frames */ | ||
281 | |||
282 | while (rxtid->baw_head != rxtid->baw_tail) { | ||
283 | rxbuf = rxtid->rxbuf + rxtid->baw_head; | ||
284 | if (!rxbuf->rx_wbuf) | ||
285 | break; | ||
286 | |||
287 | ath_rx_subframe(an, rxbuf->rx_wbuf, &rxbuf->rx_status); | ||
288 | rxbuf->rx_wbuf = NULL; | ||
289 | |||
290 | INCR(rxtid->baw_head, ATH_TID_MAX_BUFS); | ||
291 | INCR(rxtid->seq_next, IEEE80211_SEQ_MAX); | ||
292 | } | ||
293 | |||
294 | /* | ||
295 | * start a timer to flush all received frames if there are pending | ||
296 | * receive frames | ||
297 | */ | ||
298 | if (rxtid->baw_head != rxtid->baw_tail) | ||
299 | mod_timer(&rxtid->timer, ATH_RX_TIMEOUT); | ||
300 | else | ||
301 | del_timer_sync(&rxtid->timer); | ||
302 | |||
303 | spin_unlock(&rxtid->tidlock); | ||
304 | return IEEE80211_FTYPE_DATA; | ||
305 | } | ||
306 | |||
307 | /* Timer to flush all received sub-frames */ | ||
308 | |||
309 | static void ath_rx_timer(unsigned long data) | ||
310 | { | 61 | { |
311 | struct ath_arx_tid *rxtid = (struct ath_arx_tid *)data; | 62 | /* XXX block beacon interrupts */ |
312 | struct ath_node *an = rxtid->an; | 63 | ath9k_hw_setantenna(sc->sc_ah, antenna); |
313 | struct ath_rxbuf *rxbuf; | 64 | sc->rx.defant = antenna; |
314 | int nosched; | 65 | sc->rx.rxotherant = 0; |
315 | |||
316 | spin_lock_bh(&rxtid->tidlock); | ||
317 | while (rxtid->baw_head != rxtid->baw_tail) { | ||
318 | rxbuf = rxtid->rxbuf + rxtid->baw_head; | ||
319 | if (!rxbuf->rx_wbuf) { | ||
320 | INCR(rxtid->baw_head, ATH_TID_MAX_BUFS); | ||
321 | INCR(rxtid->seq_next, IEEE80211_SEQ_MAX); | ||
322 | continue; | ||
323 | } | ||
324 | |||
325 | /* | ||
326 | * Stop if the next one is a very recent frame. | ||
327 | * | ||
328 | * Call get_timestamp in every iteration to protect against the | ||
329 | * case in which a new frame is received while we are executing | ||
330 | * this function. Using a timestamp obtained before entering | ||
331 | * the loop could lead to a very large time interval | ||
332 | * (a negative value typecast to unsigned), breaking the | ||
333 | * function's logic. | ||
334 | */ | ||
335 | if ((get_timestamp() - rxbuf->rx_time) < | ||
336 | (ATH_RX_TIMEOUT * HZ / 1000)) | ||
337 | break; | ||
338 | |||
339 | ath_rx_subframe(an, rxbuf->rx_wbuf, | ||
340 | &rxbuf->rx_status); | ||
341 | rxbuf->rx_wbuf = NULL; | ||
342 | |||
343 | INCR(rxtid->baw_head, ATH_TID_MAX_BUFS); | ||
344 | INCR(rxtid->seq_next, IEEE80211_SEQ_MAX); | ||
345 | } | ||
346 | |||
347 | /* | ||
348 | * start a timer to flush all received frames if there are pending | ||
349 | * receive frames | ||
350 | */ | ||
351 | if (rxtid->baw_head != rxtid->baw_tail) | ||
352 | nosched = 0; | ||
353 | else | ||
354 | nosched = 1; /* no need to re-arm the timer again */ | ||
355 | |||
356 | spin_unlock_bh(&rxtid->tidlock); | ||
357 | } | 66 | } |
358 | 67 | ||
359 | /* Free all pending sub-frames in the re-ordering buffer */ | 68 | /* |
360 | 69 | * Extend 15-bit time stamp from rx descriptor to | |
361 | static void ath_rx_flush_tid(struct ath_softc *sc, | 70 | * a full 64-bit TSF using the current h/w TSF. |
362 | struct ath_arx_tid *rxtid, int drop) | 71 | */ |
72 | static u64 ath_extend_tsf(struct ath_softc *sc, u32 rstamp) | ||
363 | { | 73 | { |
364 | struct ath_rxbuf *rxbuf; | 74 | u64 tsf; |
365 | unsigned long flag; | ||
366 | |||
367 | spin_lock_irqsave(&rxtid->tidlock, flag); | ||
368 | while (rxtid->baw_head != rxtid->baw_tail) { | ||
369 | rxbuf = rxtid->rxbuf + rxtid->baw_head; | ||
370 | if (!rxbuf->rx_wbuf) { | ||
371 | INCR(rxtid->baw_head, ATH_TID_MAX_BUFS); | ||
372 | INCR(rxtid->seq_next, IEEE80211_SEQ_MAX); | ||
373 | continue; | ||
374 | } | ||
375 | |||
376 | if (drop) | ||
377 | dev_kfree_skb(rxbuf->rx_wbuf); | ||
378 | else | ||
379 | ath_rx_subframe(rxtid->an, | ||
380 | rxbuf->rx_wbuf, | ||
381 | &rxbuf->rx_status); | ||
382 | |||
383 | rxbuf->rx_wbuf = NULL; | ||
384 | 75 | ||
385 | INCR(rxtid->baw_head, ATH_TID_MAX_BUFS); | 76 | tsf = ath9k_hw_gettsf64(sc->sc_ah); |
386 | INCR(rxtid->seq_next, IEEE80211_SEQ_MAX); | 77 | if ((tsf & 0x7fff) < rstamp) |
387 | } | 78 | tsf -= 0x8000; |
388 | spin_unlock_irqrestore(&rxtid->tidlock, flag); | 79 | return (tsf & ~0x7fff) | rstamp; |
389 | } | 80 | } |
390 | 81 | ||
391 | static struct sk_buff *ath_rxbuf_alloc(struct ath_softc *sc, | 82 | static struct sk_buff *ath_rxbuf_alloc(struct ath_softc *sc, u32 len) |
392 | u32 len) | ||
393 | { | 83 | { |
394 | struct sk_buff *skb; | 84 | struct sk_buff *skb; |
395 | u32 off; | 85 | u32 off; |
@@ -414,67 +104,131 @@ static struct sk_buff *ath_rxbuf_alloc(struct ath_softc *sc, | |||
414 | skb_reserve(skb, sc->sc_cachelsz - off); | 104 | skb_reserve(skb, sc->sc_cachelsz - off); |
415 | } else { | 105 | } else { |
416 | DPRINTF(sc, ATH_DBG_FATAL, | 106 | DPRINTF(sc, ATH_DBG_FATAL, |
417 | "%s: skbuff alloc of size %u failed\n", | 107 | "skbuff alloc of size %u failed\n", len); |
418 | __func__, len); | ||
419 | return NULL; | 108 | return NULL; |
420 | } | 109 | } |
421 | 110 | ||
422 | return skb; | 111 | return skb; |
423 | } | 112 | } |
424 | 113 | ||
425 | static void ath_rx_requeue(struct ath_softc *sc, struct sk_buff *skb) | 114 | /* |
115 | * For Decrypt or Demic errors, we only mark packet status here and always push | ||
116 | * up the frame up to let mac80211 handle the actual error case, be it no | ||
117 | * decryption key or real decryption error. This let us keep statistics there. | ||
118 | */ | ||
119 | static int ath_rx_prepare(struct sk_buff *skb, struct ath_desc *ds, | ||
120 | struct ieee80211_rx_status *rx_status, bool *decrypt_error, | ||
121 | struct ath_softc *sc) | ||
426 | { | 122 | { |
427 | struct ath_buf *bf = ATH_RX_CONTEXT(skb)->ctx_rxbuf; | 123 | struct ieee80211_hdr *hdr; |
124 | u8 ratecode; | ||
125 | __le16 fc; | ||
428 | 126 | ||
429 | ASSERT(bf != NULL); | 127 | hdr = (struct ieee80211_hdr *)skb->data; |
128 | fc = hdr->frame_control; | ||
129 | memset(rx_status, 0, sizeof(struct ieee80211_rx_status)); | ||
430 | 130 | ||
431 | spin_lock_bh(&sc->sc_rxbuflock); | 131 | if (ds->ds_rxstat.rs_more) { |
432 | if (bf->bf_status & ATH_BUFSTATUS_STALE) { | ||
433 | /* | 132 | /* |
434 | * This buffer is still held for hw acess. | 133 | * Frame spans multiple descriptors; this cannot happen yet |
435 | * Mark it as free to be re-queued it later. | 134 | * as we don't support jumbograms. If not in monitor mode, |
135 | * discard the frame. Enable this if you want to see | ||
136 | * error frames in Monitor mode. | ||
436 | */ | 137 | */ |
437 | bf->bf_status |= ATH_BUFSTATUS_FREE; | 138 | if (sc->sc_ah->ah_opmode != NL80211_IFTYPE_MONITOR) |
438 | } else { | 139 | goto rx_next; |
439 | /* XXX: we probably never enter here, remove after | 140 | } else if (ds->ds_rxstat.rs_status != 0) { |
440 | * verification */ | 141 | if (ds->ds_rxstat.rs_status & ATH9K_RXERR_CRC) |
441 | list_add_tail(&bf->list, &sc->sc_rxbuf); | 142 | rx_status->flag |= RX_FLAG_FAILED_FCS_CRC; |
442 | ath_rx_buf_link(sc, bf); | 143 | if (ds->ds_rxstat.rs_status & ATH9K_RXERR_PHY) |
144 | goto rx_next; | ||
145 | |||
146 | if (ds->ds_rxstat.rs_status & ATH9K_RXERR_DECRYPT) { | ||
147 | *decrypt_error = true; | ||
148 | } else if (ds->ds_rxstat.rs_status & ATH9K_RXERR_MIC) { | ||
149 | if (ieee80211_is_ctl(fc)) | ||
150 | /* | ||
151 | * Sometimes, we get invalid | ||
152 | * MIC failures on valid control frames. | ||
153 | * Remove these mic errors. | ||
154 | */ | ||
155 | ds->ds_rxstat.rs_status &= ~ATH9K_RXERR_MIC; | ||
156 | else | ||
157 | rx_status->flag |= RX_FLAG_MMIC_ERROR; | ||
158 | } | ||
159 | /* | ||
160 | * Reject error frames with the exception of | ||
161 | * decryption and MIC failures. For monitor mode, | ||
162 | * we also ignore the CRC error. | ||
163 | */ | ||
164 | if (sc->sc_ah->ah_opmode == NL80211_IFTYPE_MONITOR) { | ||
165 | if (ds->ds_rxstat.rs_status & | ||
166 | ~(ATH9K_RXERR_DECRYPT | ATH9K_RXERR_MIC | | ||
167 | ATH9K_RXERR_CRC)) | ||
168 | goto rx_next; | ||
169 | } else { | ||
170 | if (ds->ds_rxstat.rs_status & | ||
171 | ~(ATH9K_RXERR_DECRYPT | ATH9K_RXERR_MIC)) { | ||
172 | goto rx_next; | ||
173 | } | ||
174 | } | ||
443 | } | 175 | } |
444 | spin_unlock_bh(&sc->sc_rxbuflock); | ||
445 | } | ||
446 | 176 | ||
447 | /* | 177 | ratecode = ds->ds_rxstat.rs_rate; |
448 | * The skb indicated to upper stack won't be returned to us. | 178 | |
449 | * So we have to allocate a new one and queue it by ourselves. | 179 | if (ratecode & 0x80) { |
450 | */ | 180 | /* HT rate */ |
451 | static int ath_rx_indicate(struct ath_softc *sc, | 181 | rx_status->flag |= RX_FLAG_HT; |
452 | struct sk_buff *skb, | 182 | if (ds->ds_rxstat.rs_flags & ATH9K_RX_2040) |
453 | struct ath_recv_status *status, | 183 | rx_status->flag |= RX_FLAG_40MHZ; |
454 | u16 keyix) | 184 | if (ds->ds_rxstat.rs_flags & ATH9K_RX_GI) |
455 | { | 185 | rx_status->flag |= RX_FLAG_SHORT_GI; |
456 | struct ath_buf *bf = ATH_RX_CONTEXT(skb)->ctx_rxbuf; | 186 | rx_status->rate_idx = ratecode & 0x7f; |
457 | struct sk_buff *nskb; | 187 | } else { |
458 | int type; | 188 | int i = 0, cur_band, n_rates; |
459 | 189 | struct ieee80211_hw *hw = sc->hw; | |
460 | /* indicate frame to the stack, which will free the old skb. */ | ||
461 | type = _ath_rx_indicate(sc, skb, status, keyix); | ||
462 | |||
463 | /* allocate a new skb and queue it to for H/W processing */ | ||
464 | nskb = ath_rxbuf_alloc(sc, sc->sc_rxbufsize); | ||
465 | if (nskb != NULL) { | ||
466 | bf->bf_mpdu = nskb; | ||
467 | bf->bf_buf_addr = pci_map_single(sc->pdev, nskb->data, | ||
468 | sc->sc_rxbufsize, | ||
469 | PCI_DMA_FROMDEVICE); | ||
470 | bf->bf_dmacontext = bf->bf_buf_addr; | ||
471 | ATH_RX_CONTEXT(nskb)->ctx_rxbuf = bf; | ||
472 | 190 | ||
473 | /* queue the new wbuf to H/W */ | 191 | cur_band = hw->conf.channel->band; |
474 | ath_rx_requeue(sc, nskb); | 192 | n_rates = sc->sbands[cur_band].n_bitrates; |
193 | |||
194 | for (i = 0; i < n_rates; i++) { | ||
195 | if (sc->sbands[cur_band].bitrates[i].hw_value == | ||
196 | ratecode) { | ||
197 | rx_status->rate_idx = i; | ||
198 | break; | ||
199 | } | ||
200 | |||
201 | if (sc->sbands[cur_band].bitrates[i].hw_value_short == | ||
202 | ratecode) { | ||
203 | rx_status->rate_idx = i; | ||
204 | rx_status->flag |= RX_FLAG_SHORTPRE; | ||
205 | break; | ||
206 | } | ||
207 | } | ||
475 | } | 208 | } |
476 | 209 | ||
477 | return type; | 210 | rx_status->mactime = ath_extend_tsf(sc, ds->ds_rxstat.rs_tstamp); |
211 | rx_status->band = sc->hw->conf.channel->band; | ||
212 | rx_status->freq = sc->hw->conf.channel->center_freq; | ||
213 | rx_status->noise = sc->sc_ani.sc_noise_floor; | ||
214 | rx_status->signal = rx_status->noise + ds->ds_rxstat.rs_rssi; | ||
215 | rx_status->antenna = ds->ds_rxstat.rs_antenna; | ||
216 | |||
217 | /* at 45 you will be able to use MCS 15 reliably. A more elaborate | ||
218 | * scheme can be used here but it requires tables of SNR/throughput for | ||
219 | * each possible mode used. */ | ||
220 | rx_status->qual = ds->ds_rxstat.rs_rssi * 100 / 45; | ||
221 | |||
222 | /* rssi can be more than 45 though, anything above that | ||
223 | * should be considered at 100% */ | ||
224 | if (rx_status->qual > 100) | ||
225 | rx_status->qual = 100; | ||
226 | |||
227 | rx_status->flag |= RX_FLAG_TSFT; | ||
228 | |||
229 | return 1; | ||
230 | rx_next: | ||
231 | return 0; | ||
478 | } | 232 | } |
479 | 233 | ||
480 | static void ath_opmode_init(struct ath_softc *sc) | 234 | static void ath_opmode_init(struct ath_softc *sc) |
@@ -498,11 +252,7 @@ static void ath_opmode_init(struct ath_softc *sc) | |||
498 | 252 | ||
499 | /* calculate and install multicast filter */ | 253 | /* calculate and install multicast filter */ |
500 | mfilt[0] = mfilt[1] = ~0; | 254 | mfilt[0] = mfilt[1] = ~0; |
501 | |||
502 | ath9k_hw_setmcastfilter(ah, mfilt[0], mfilt[1]); | 255 | ath9k_hw_setmcastfilter(ah, mfilt[0], mfilt[1]); |
503 | DPRINTF(sc, ATH_DBG_CONFIG , | ||
504 | "%s: RX filter 0x%x, MC filter %08x:%08x\n", | ||
505 | __func__, rfilt, mfilt[0], mfilt[1]); | ||
506 | } | 256 | } |
507 | 257 | ||
508 | int ath_rx_init(struct ath_softc *sc, int nbufs) | 258 | int ath_rx_init(struct ath_softc *sc, int nbufs) |
@@ -512,38 +262,29 @@ int ath_rx_init(struct ath_softc *sc, int nbufs) | |||
512 | int error = 0; | 262 | int error = 0; |
513 | 263 | ||
514 | do { | 264 | do { |
515 | spin_lock_init(&sc->sc_rxflushlock); | 265 | spin_lock_init(&sc->rx.rxflushlock); |
516 | sc->sc_flags &= ~SC_OP_RXFLUSH; | 266 | sc->sc_flags &= ~SC_OP_RXFLUSH; |
517 | spin_lock_init(&sc->sc_rxbuflock); | 267 | spin_lock_init(&sc->rx.rxbuflock); |
518 | 268 | ||
519 | /* | 269 | sc->rx.bufsize = roundup(IEEE80211_MAX_MPDU_LEN, |
520 | * Cisco's VPN software requires that drivers be able to | ||
521 | * receive encapsulated frames that are larger than the MTU. | ||
522 | * Since we can't be sure how large a frame we'll get, setup | ||
523 | * to handle the larges on possible. | ||
524 | */ | ||
525 | sc->sc_rxbufsize = roundup(IEEE80211_MAX_MPDU_LEN, | ||
526 | min(sc->sc_cachelsz, | 270 | min(sc->sc_cachelsz, |
527 | (u16)64)); | 271 | (u16)64)); |
528 | 272 | ||
529 | DPRINTF(sc, ATH_DBG_CONFIG, "%s: cachelsz %u rxbufsize %u\n", | 273 | DPRINTF(sc, ATH_DBG_CONFIG, "cachelsz %u rxbufsize %u\n", |
530 | __func__, sc->sc_cachelsz, sc->sc_rxbufsize); | 274 | sc->sc_cachelsz, sc->rx.bufsize); |
531 | 275 | ||
532 | /* Initialize rx descriptors */ | 276 | /* Initialize rx descriptors */ |
533 | 277 | ||
534 | error = ath_descdma_setup(sc, &sc->sc_rxdma, &sc->sc_rxbuf, | 278 | error = ath_descdma_setup(sc, &sc->rx.rxdma, &sc->rx.rxbuf, |
535 | "rx", nbufs, 1); | 279 | "rx", nbufs, 1); |
536 | if (error != 0) { | 280 | if (error != 0) { |
537 | DPRINTF(sc, ATH_DBG_FATAL, | 281 | DPRINTF(sc, ATH_DBG_FATAL, |
538 | "%s: failed to allocate rx descriptors: %d\n", | 282 | "failed to allocate rx descriptors: %d\n", error); |
539 | __func__, error); | ||
540 | break; | 283 | break; |
541 | } | 284 | } |
542 | 285 | ||
543 | /* Pre-allocate a wbuf for each rx buffer */ | 286 | list_for_each_entry(bf, &sc->rx.rxbuf, list) { |
544 | 287 | skb = ath_rxbuf_alloc(sc, sc->rx.bufsize); | |
545 | list_for_each_entry(bf, &sc->sc_rxbuf, list) { | ||
546 | skb = ath_rxbuf_alloc(sc, sc->sc_rxbufsize); | ||
547 | if (skb == NULL) { | 288 | if (skb == NULL) { |
548 | error = -ENOMEM; | 289 | error = -ENOMEM; |
549 | break; | 290 | break; |
@@ -551,12 +292,20 @@ int ath_rx_init(struct ath_softc *sc, int nbufs) | |||
551 | 292 | ||
552 | bf->bf_mpdu = skb; | 293 | bf->bf_mpdu = skb; |
553 | bf->bf_buf_addr = pci_map_single(sc->pdev, skb->data, | 294 | bf->bf_buf_addr = pci_map_single(sc->pdev, skb->data, |
554 | sc->sc_rxbufsize, | 295 | sc->rx.bufsize, |
555 | PCI_DMA_FROMDEVICE); | 296 | PCI_DMA_FROMDEVICE); |
297 | if (unlikely(pci_dma_mapping_error(sc->pdev, | ||
298 | bf->bf_buf_addr))) { | ||
299 | dev_kfree_skb_any(skb); | ||
300 | bf->bf_mpdu = NULL; | ||
301 | DPRINTF(sc, ATH_DBG_CONFIG, | ||
302 | "pci_dma_mapping_error() on RX init\n"); | ||
303 | error = -ENOMEM; | ||
304 | break; | ||
305 | } | ||
556 | bf->bf_dmacontext = bf->bf_buf_addr; | 306 | bf->bf_dmacontext = bf->bf_buf_addr; |
557 | ATH_RX_CONTEXT(skb)->ctx_rxbuf = bf; | ||
558 | } | 307 | } |
559 | sc->sc_rxlink = NULL; | 308 | sc->rx.rxlink = NULL; |
560 | 309 | ||
561 | } while (0); | 310 | } while (0); |
562 | 311 | ||
@@ -566,23 +315,19 @@ int ath_rx_init(struct ath_softc *sc, int nbufs) | |||
566 | return error; | 315 | return error; |
567 | } | 316 | } |
568 | 317 | ||
569 | /* Reclaim all rx queue resources */ | ||
570 | |||
571 | void ath_rx_cleanup(struct ath_softc *sc) | 318 | void ath_rx_cleanup(struct ath_softc *sc) |
572 | { | 319 | { |
573 | struct sk_buff *skb; | 320 | struct sk_buff *skb; |
574 | struct ath_buf *bf; | 321 | struct ath_buf *bf; |
575 | 322 | ||
576 | list_for_each_entry(bf, &sc->sc_rxbuf, list) { | 323 | list_for_each_entry(bf, &sc->rx.rxbuf, list) { |
577 | skb = bf->bf_mpdu; | 324 | skb = bf->bf_mpdu; |
578 | if (skb) | 325 | if (skb) |
579 | dev_kfree_skb(skb); | 326 | dev_kfree_skb(skb); |
580 | } | 327 | } |
581 | 328 | ||
582 | /* cleanup rx descriptors */ | 329 | if (sc->rx.rxdma.dd_desc_len != 0) |
583 | 330 | ath_descdma_cleanup(sc, &sc->rx.rxdma, &sc->rx.rxbuf); | |
584 | if (sc->sc_rxdma.dd_desc_len != 0) | ||
585 | ath_descdma_cleanup(sc, &sc->sc_rxdma, &sc->sc_rxbuf); | ||
586 | } | 331 | } |
587 | 332 | ||
588 | /* | 333 | /* |
@@ -615,201 +360,115 @@ u32 ath_calcrxfilter(struct ath_softc *sc) | |||
615 | | ATH9K_RX_FILTER_MCAST; | 360 | | ATH9K_RX_FILTER_MCAST; |
616 | 361 | ||
617 | /* If not a STA, enable processing of Probe Requests */ | 362 | /* If not a STA, enable processing of Probe Requests */ |
618 | if (sc->sc_ah->ah_opmode != ATH9K_M_STA) | 363 | if (sc->sc_ah->ah_opmode != NL80211_IFTYPE_STATION) |
619 | rfilt |= ATH9K_RX_FILTER_PROBEREQ; | 364 | rfilt |= ATH9K_RX_FILTER_PROBEREQ; |
620 | 365 | ||
621 | /* Can't set HOSTAP into promiscous mode */ | 366 | /* Can't set HOSTAP into promiscous mode */ |
622 | if (((sc->sc_ah->ah_opmode != ATH9K_M_HOSTAP) && | 367 | if (((sc->sc_ah->ah_opmode != NL80211_IFTYPE_AP) && |
623 | (sc->rx_filter & FIF_PROMISC_IN_BSS)) || | 368 | (sc->rx.rxfilter & FIF_PROMISC_IN_BSS)) || |
624 | (sc->sc_ah->ah_opmode == ATH9K_M_MONITOR)) { | 369 | (sc->sc_ah->ah_opmode == NL80211_IFTYPE_MONITOR)) { |
625 | rfilt |= ATH9K_RX_FILTER_PROM; | 370 | rfilt |= ATH9K_RX_FILTER_PROM; |
626 | /* ??? To prevent from sending ACK */ | 371 | /* ??? To prevent from sending ACK */ |
627 | rfilt &= ~ATH9K_RX_FILTER_UCAST; | 372 | rfilt &= ~ATH9K_RX_FILTER_UCAST; |
628 | } | 373 | } |
629 | 374 | ||
630 | if (((sc->sc_ah->ah_opmode == ATH9K_M_STA) && | 375 | if (sc->sc_ah->ah_opmode == NL80211_IFTYPE_STATION || |
631 | (sc->rx_filter & FIF_BCN_PRBRESP_PROMISC)) || | 376 | sc->sc_ah->ah_opmode == NL80211_IFTYPE_ADHOC) |
632 | (sc->sc_ah->ah_opmode == ATH9K_M_IBSS)) | ||
633 | rfilt |= ATH9K_RX_FILTER_BEACON; | 377 | rfilt |= ATH9K_RX_FILTER_BEACON; |
634 | 378 | ||
635 | /* If in HOSTAP mode, want to enable reception of PSPOLL frames | 379 | /* If in HOSTAP mode, want to enable reception of PSPOLL frames |
636 | & beacon frames */ | 380 | & beacon frames */ |
637 | if (sc->sc_ah->ah_opmode == ATH9K_M_HOSTAP) | 381 | if (sc->sc_ah->ah_opmode == NL80211_IFTYPE_AP) |
638 | rfilt |= (ATH9K_RX_FILTER_BEACON | ATH9K_RX_FILTER_PSPOLL); | 382 | rfilt |= (ATH9K_RX_FILTER_BEACON | ATH9K_RX_FILTER_PSPOLL); |
383 | |||
639 | return rfilt; | 384 | return rfilt; |
640 | 385 | ||
641 | #undef RX_FILTER_PRESERVE | 386 | #undef RX_FILTER_PRESERVE |
642 | } | 387 | } |
643 | 388 | ||
644 | /* Enable the receive h/w following a reset. */ | ||
645 | |||
646 | int ath_startrecv(struct ath_softc *sc) | 389 | int ath_startrecv(struct ath_softc *sc) |
647 | { | 390 | { |
648 | struct ath_hal *ah = sc->sc_ah; | 391 | struct ath_hal *ah = sc->sc_ah; |
649 | struct ath_buf *bf, *tbf; | 392 | struct ath_buf *bf, *tbf; |
650 | 393 | ||
651 | spin_lock_bh(&sc->sc_rxbuflock); | 394 | spin_lock_bh(&sc->rx.rxbuflock); |
652 | if (list_empty(&sc->sc_rxbuf)) | 395 | if (list_empty(&sc->rx.rxbuf)) |
653 | goto start_recv; | 396 | goto start_recv; |
654 | 397 | ||
655 | sc->sc_rxlink = NULL; | 398 | sc->rx.rxlink = NULL; |
656 | list_for_each_entry_safe(bf, tbf, &sc->sc_rxbuf, list) { | 399 | list_for_each_entry_safe(bf, tbf, &sc->rx.rxbuf, list) { |
657 | if (bf->bf_status & ATH_BUFSTATUS_STALE) { | ||
658 | /* restarting h/w, no need for holding descriptors */ | ||
659 | bf->bf_status &= ~ATH_BUFSTATUS_STALE; | ||
660 | /* | ||
661 | * Upper layer may not be done with the frame yet so | ||
662 | * we can't just re-queue it to hardware. Remove it | ||
663 | * from h/w queue. It'll be re-queued when upper layer | ||
664 | * returns the frame and ath_rx_requeue_mpdu is called. | ||
665 | */ | ||
666 | if (!(bf->bf_status & ATH_BUFSTATUS_FREE)) { | ||
667 | list_del(&bf->list); | ||
668 | continue; | ||
669 | } | ||
670 | } | ||
671 | /* chain descriptors */ | ||
672 | ath_rx_buf_link(sc, bf); | 400 | ath_rx_buf_link(sc, bf); |
673 | } | 401 | } |
674 | 402 | ||
675 | /* We could have deleted elements so the list may be empty now */ | 403 | /* We could have deleted elements so the list may be empty now */ |
676 | if (list_empty(&sc->sc_rxbuf)) | 404 | if (list_empty(&sc->rx.rxbuf)) |
677 | goto start_recv; | 405 | goto start_recv; |
678 | 406 | ||
679 | bf = list_first_entry(&sc->sc_rxbuf, struct ath_buf, list); | 407 | bf = list_first_entry(&sc->rx.rxbuf, struct ath_buf, list); |
680 | ath9k_hw_putrxbuf(ah, bf->bf_daddr); | 408 | ath9k_hw_putrxbuf(ah, bf->bf_daddr); |
681 | ath9k_hw_rxena(ah); /* enable recv descriptors */ | 409 | ath9k_hw_rxena(ah); |
682 | 410 | ||
683 | start_recv: | 411 | start_recv: |
684 | spin_unlock_bh(&sc->sc_rxbuflock); | 412 | spin_unlock_bh(&sc->rx.rxbuflock); |
685 | ath_opmode_init(sc); /* set filters, etc. */ | 413 | ath_opmode_init(sc); |
686 | ath9k_hw_startpcureceive(ah); /* re-enable PCU/DMA engine */ | 414 | ath9k_hw_startpcureceive(ah); |
415 | |||
687 | return 0; | 416 | return 0; |
688 | } | 417 | } |
689 | 418 | ||
690 | /* Disable the receive h/w in preparation for a reset. */ | ||
691 | |||
692 | bool ath_stoprecv(struct ath_softc *sc) | 419 | bool ath_stoprecv(struct ath_softc *sc) |
693 | { | 420 | { |
694 | struct ath_hal *ah = sc->sc_ah; | 421 | struct ath_hal *ah = sc->sc_ah; |
695 | u64 tsf; | ||
696 | bool stopped; | 422 | bool stopped; |
697 | 423 | ||
698 | ath9k_hw_stoppcurecv(ah); /* disable PCU */ | 424 | ath9k_hw_stoppcurecv(ah); |
699 | ath9k_hw_setrxfilter(ah, 0); /* clear recv filter */ | 425 | ath9k_hw_setrxfilter(ah, 0); |
700 | stopped = ath9k_hw_stopdmarecv(ah); /* disable DMA engine */ | 426 | stopped = ath9k_hw_stopdmarecv(ah); |
701 | mdelay(3); /* 3ms is long enough for 1 frame */ | 427 | mdelay(3); /* 3ms is long enough for 1 frame */ |
702 | tsf = ath9k_hw_gettsf64(ah); | 428 | sc->rx.rxlink = NULL; |
703 | sc->sc_rxlink = NULL; /* just in case */ | 429 | |
704 | return stopped; | 430 | return stopped; |
705 | } | 431 | } |
706 | 432 | ||
707 | /* Flush receive queue */ | ||
708 | |||
709 | void ath_flushrecv(struct ath_softc *sc) | 433 | void ath_flushrecv(struct ath_softc *sc) |
710 | { | 434 | { |
711 | /* | 435 | spin_lock_bh(&sc->rx.rxflushlock); |
712 | * ath_rx_tasklet may be used to handle rx interrupt and flush receive | ||
713 | * queue at the same time. Use a lock to serialize the access of rx | ||
714 | * queue. | ||
715 | * ath_rx_tasklet cannot hold the spinlock while indicating packets. | ||
716 | * Instead, do not claim the spinlock but check for a flush in | ||
717 | * progress (see references to sc_rxflush) | ||
718 | */ | ||
719 | spin_lock_bh(&sc->sc_rxflushlock); | ||
720 | sc->sc_flags |= SC_OP_RXFLUSH; | 436 | sc->sc_flags |= SC_OP_RXFLUSH; |
721 | |||
722 | ath_rx_tasklet(sc, 1); | 437 | ath_rx_tasklet(sc, 1); |
723 | |||
724 | sc->sc_flags &= ~SC_OP_RXFLUSH; | 438 | sc->sc_flags &= ~SC_OP_RXFLUSH; |
725 | spin_unlock_bh(&sc->sc_rxflushlock); | 439 | spin_unlock_bh(&sc->rx.rxflushlock); |
726 | } | 440 | } |
727 | 441 | ||
728 | /* Process an individual frame */ | ||
729 | |||
730 | int ath_rx_input(struct ath_softc *sc, | ||
731 | struct ath_node *an, | ||
732 | int is_ampdu, | ||
733 | struct sk_buff *skb, | ||
734 | struct ath_recv_status *rx_status, | ||
735 | enum ATH_RX_TYPE *status) | ||
736 | { | ||
737 | if (is_ampdu && (sc->sc_flags & SC_OP_RXAGGR)) { | ||
738 | *status = ATH_RX_CONSUMED; | ||
739 | return ath_ampdu_input(sc, an, skb, rx_status); | ||
740 | } else { | ||
741 | *status = ATH_RX_NON_CONSUMED; | ||
742 | return -1; | ||
743 | } | ||
744 | } | ||
745 | |||
746 | /* Process receive queue, as well as LED, etc. */ | ||
747 | |||
748 | int ath_rx_tasklet(struct ath_softc *sc, int flush) | 442 | int ath_rx_tasklet(struct ath_softc *sc, int flush) |
749 | { | 443 | { |
750 | #define PA2DESC(_sc, _pa) \ | 444 | #define PA2DESC(_sc, _pa) \ |
751 | ((struct ath_desc *)((caddr_t)(_sc)->sc_rxdma.dd_desc + \ | 445 | ((struct ath_desc *)((caddr_t)(_sc)->rx.rxdma.dd_desc + \ |
752 | ((_pa) - (_sc)->sc_rxdma.dd_desc_paddr))) | 446 | ((_pa) - (_sc)->rx.rxdma.dd_desc_paddr))) |
753 | 447 | ||
754 | struct ath_buf *bf, *bf_held = NULL; | 448 | struct ath_buf *bf; |
755 | struct ath_desc *ds; | 449 | struct ath_desc *ds; |
756 | struct ieee80211_hdr *hdr; | 450 | struct sk_buff *skb = NULL, *requeue_skb; |
757 | struct sk_buff *skb = NULL; | 451 | struct ieee80211_rx_status rx_status; |
758 | struct ath_recv_status rx_status; | ||
759 | struct ath_hal *ah = sc->sc_ah; | 452 | struct ath_hal *ah = sc->sc_ah; |
760 | int type, rx_processed = 0; | 453 | struct ieee80211_hdr *hdr; |
761 | u32 phyerr; | 454 | int hdrlen, padsize, retval; |
762 | u8 chainreset = 0; | 455 | bool decrypt_error = false; |
763 | int retval; | 456 | u8 keyix; |
764 | __le16 fc; | 457 | |
458 | spin_lock_bh(&sc->rx.rxbuflock); | ||
765 | 459 | ||
766 | do { | 460 | do { |
767 | /* If handling rx interrupt and flush is in progress => exit */ | 461 | /* If handling rx interrupt and flush is in progress => exit */ |
768 | if ((sc->sc_flags & SC_OP_RXFLUSH) && (flush == 0)) | 462 | if ((sc->sc_flags & SC_OP_RXFLUSH) && (flush == 0)) |
769 | break; | 463 | break; |
770 | 464 | ||
771 | spin_lock_bh(&sc->sc_rxbuflock); | 465 | if (list_empty(&sc->rx.rxbuf)) { |
772 | if (list_empty(&sc->sc_rxbuf)) { | 466 | sc->rx.rxlink = NULL; |
773 | sc->sc_rxlink = NULL; | ||
774 | spin_unlock_bh(&sc->sc_rxbuflock); | ||
775 | break; | 467 | break; |
776 | } | 468 | } |
777 | 469 | ||
778 | bf = list_first_entry(&sc->sc_rxbuf, struct ath_buf, list); | 470 | bf = list_first_entry(&sc->rx.rxbuf, struct ath_buf, list); |
779 | |||
780 | /* | ||
781 | * There is a race condition that BH gets scheduled after sw | ||
782 | * writes RxE and before hw re-load the last descriptor to get | ||
783 | * the newly chained one. Software must keep the last DONE | ||
784 | * descriptor as a holding descriptor - software does so by | ||
785 | * marking it with the STALE flag. | ||
786 | */ | ||
787 | if (bf->bf_status & ATH_BUFSTATUS_STALE) { | ||
788 | bf_held = bf; | ||
789 | if (list_is_last(&bf_held->list, &sc->sc_rxbuf)) { | ||
790 | /* | ||
791 | * The holding descriptor is the last | ||
792 | * descriptor in queue. It's safe to | ||
793 | * remove the last holding descriptor | ||
794 | * in BH context. | ||
795 | */ | ||
796 | list_del(&bf_held->list); | ||
797 | bf_held->bf_status &= ~ATH_BUFSTATUS_STALE; | ||
798 | sc->sc_rxlink = NULL; | ||
799 | |||
800 | if (bf_held->bf_status & ATH_BUFSTATUS_FREE) { | ||
801 | list_add_tail(&bf_held->list, | ||
802 | &sc->sc_rxbuf); | ||
803 | ath_rx_buf_link(sc, bf_held); | ||
804 | } | ||
805 | spin_unlock_bh(&sc->sc_rxbuflock); | ||
806 | break; | ||
807 | } | ||
808 | bf = list_entry(bf->list.next, struct ath_buf, list); | ||
809 | } | ||
810 | |||
811 | ds = bf->bf_desc; | 471 | ds = bf->bf_desc; |
812 | ++rx_processed; | ||
813 | 472 | ||
814 | /* | 473 | /* |
815 | * Must provide the virtual address of the current | 474 | * Must provide the virtual address of the current |
@@ -822,8 +481,7 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush) | |||
822 | * on. All this is necessary because of our use of | 481 | * on. All this is necessary because of our use of |
823 | * a self-linked list to avoid rx overruns. | 482 | * a self-linked list to avoid rx overruns. |
824 | */ | 483 | */ |
825 | retval = ath9k_hw_rxprocdesc(ah, | 484 | retval = ath9k_hw_rxprocdesc(ah, ds, |
826 | ds, | ||
827 | bf->bf_daddr, | 485 | bf->bf_daddr, |
828 | PA2DESC(sc, ds->ds_link), | 486 | PA2DESC(sc, ds->ds_link), |
829 | 0); | 487 | 0); |
@@ -831,8 +489,8 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush) | |||
831 | struct ath_buf *tbf; | 489 | struct ath_buf *tbf; |
832 | struct ath_desc *tds; | 490 | struct ath_desc *tds; |
833 | 491 | ||
834 | if (list_is_last(&bf->list, &sc->sc_rxbuf)) { | 492 | if (list_is_last(&bf->list, &sc->rx.rxbuf)) { |
835 | spin_unlock_bh(&sc->sc_rxbuflock); | 493 | sc->rx.rxlink = NULL; |
836 | break; | 494 | break; |
837 | } | 495 | } |
838 | 496 | ||
@@ -850,451 +508,127 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush) | |||
850 | */ | 508 | */ |
851 | 509 | ||
852 | tds = tbf->bf_desc; | 510 | tds = tbf->bf_desc; |
853 | retval = ath9k_hw_rxprocdesc(ah, | 511 | retval = ath9k_hw_rxprocdesc(ah, tds, tbf->bf_daddr, |
854 | tds, tbf->bf_daddr, | 512 | PA2DESC(sc, tds->ds_link), 0); |
855 | PA2DESC(sc, tds->ds_link), 0); | ||
856 | if (retval == -EINPROGRESS) { | 513 | if (retval == -EINPROGRESS) { |
857 | spin_unlock_bh(&sc->sc_rxbuflock); | ||
858 | break; | 514 | break; |
859 | } | 515 | } |
860 | } | 516 | } |
861 | 517 | ||
862 | /* XXX: we do not support frames spanning | ||
863 | * multiple descriptors */ | ||
864 | bf->bf_status |= ATH_BUFSTATUS_DONE; | ||
865 | |||
866 | skb = bf->bf_mpdu; | 518 | skb = bf->bf_mpdu; |
867 | if (skb == NULL) { /* XXX ??? can this happen */ | 519 | if (!skb) |
868 | spin_unlock_bh(&sc->sc_rxbuflock); | ||
869 | continue; | 520 | continue; |
870 | } | 521 | |
871 | /* | 522 | /* |
872 | * Now we know it's a completed frame, we can indicate the | 523 | * Synchronize the DMA transfer with CPU before |
873 | * frame. Remove the previous holding descriptor and leave | 524 | * 1. accessing the frame |
874 | * this one in the queue as the new holding descriptor. | 525 | * 2. requeueing the same buffer to h/w |
875 | */ | 526 | */ |
876 | if (bf_held) { | 527 | pci_dma_sync_single_for_cpu(sc->pdev, bf->bf_buf_addr, |
877 | list_del(&bf_held->list); | 528 | sc->rx.bufsize, |
878 | bf_held->bf_status &= ~ATH_BUFSTATUS_STALE; | 529 | PCI_DMA_FROMDEVICE); |
879 | if (bf_held->bf_status & ATH_BUFSTATUS_FREE) { | ||
880 | list_add_tail(&bf_held->list, &sc->sc_rxbuf); | ||
881 | /* try to requeue this descriptor */ | ||
882 | ath_rx_buf_link(sc, bf_held); | ||
883 | } | ||
884 | } | ||
885 | 530 | ||
886 | bf->bf_status |= ATH_BUFSTATUS_STALE; | ||
887 | bf_held = bf; | ||
888 | /* | 531 | /* |
889 | * Release the lock here in case ieee80211_input() return | 532 | * If we're asked to flush receive queue, directly |
890 | * the frame immediately by calling ath_rx_mpdu_requeue(). | 533 | * chain it back at the queue without processing it. |
891 | */ | 534 | */ |
892 | spin_unlock_bh(&sc->sc_rxbuflock); | 535 | if (flush) |
536 | goto requeue; | ||
893 | 537 | ||
894 | if (flush) { | 538 | if (!ds->ds_rxstat.rs_datalen) |
895 | /* | 539 | goto requeue; |
896 | * If we're asked to flush receive queue, directly | ||
897 | * chain it back at the queue without processing it. | ||
898 | */ | ||
899 | goto rx_next; | ||
900 | } | ||
901 | 540 | ||
902 | hdr = (struct ieee80211_hdr *)skb->data; | 541 | /* The status portion of the descriptor could get corrupted. */ |
903 | fc = hdr->frame_control; | 542 | if (sc->rx.bufsize < ds->ds_rxstat.rs_datalen) |
904 | memset(&rx_status, 0, sizeof(struct ath_recv_status)); | 543 | goto requeue; |
905 | 544 | ||
906 | if (ds->ds_rxstat.rs_more) { | 545 | if (!ath_rx_prepare(skb, ds, &rx_status, &decrypt_error, sc)) |
907 | /* | 546 | goto requeue; |
908 | * Frame spans multiple descriptors; this | 547 | |
909 | * cannot happen yet as we don't support | 548 | /* Ensure we always have an skb to requeue once we are done |
910 | * jumbograms. If not in monitor mode, | 549 | * processing the current buffer's skb */ |
911 | * discard the frame. | 550 | requeue_skb = ath_rxbuf_alloc(sc, sc->rx.bufsize); |
912 | */ | 551 | |
913 | #ifndef ERROR_FRAMES | 552 | /* If there is no memory we ignore the current RX'd frame, |
914 | /* | 553 | * tell hardware it can give us a new frame using the old |
915 | * Enable this if you want to see | 554 | * skb and put it at the tail of the sc->rx.rxbuf list for |
916 | * error frames in Monitor mode. | 555 | * processing. */ |
917 | */ | 556 | if (!requeue_skb) |
918 | if (sc->sc_ah->ah_opmode != ATH9K_M_MONITOR) | 557 | goto requeue; |
919 | goto rx_next; | 558 | |
920 | #endif | 559 | /* Unmap the frame */ |
921 | /* fall thru for monitor mode handling... */ | 560 | pci_unmap_single(sc->pdev, bf->bf_buf_addr, |
922 | } else if (ds->ds_rxstat.rs_status != 0) { | 561 | sc->rx.bufsize, |
923 | if (ds->ds_rxstat.rs_status & ATH9K_RXERR_CRC) | 562 | PCI_DMA_FROMDEVICE); |
924 | rx_status.flags |= ATH_RX_FCS_ERROR; | ||
925 | if (ds->ds_rxstat.rs_status & ATH9K_RXERR_PHY) { | ||
926 | phyerr = ds->ds_rxstat.rs_phyerr & 0x1f; | ||
927 | goto rx_next; | ||
928 | } | ||
929 | 563 | ||
930 | if (ds->ds_rxstat.rs_status & ATH9K_RXERR_DECRYPT) { | ||
931 | /* | ||
932 | * Decrypt error. We only mark packet status | ||
933 | * here and always push up the frame up to let | ||
934 | * mac80211 handle the actual error case, be | ||
935 | * it no decryption key or real decryption | ||
936 | * error. This let us keep statistics there. | ||
937 | */ | ||
938 | rx_status.flags |= ATH_RX_DECRYPT_ERROR; | ||
939 | } else if (ds->ds_rxstat.rs_status & ATH9K_RXERR_MIC) { | ||
940 | /* | ||
941 | * Demic error. We only mark frame status here | ||
942 | * and always push up the frame up to let | ||
943 | * mac80211 handle the actual error case. This | ||
944 | * let us keep statistics there. Hardware may | ||
945 | * post a false-positive MIC error. | ||
946 | */ | ||
947 | if (ieee80211_is_ctl(fc)) | ||
948 | /* | ||
949 | * Sometimes, we get invalid | ||
950 | * MIC failures on valid control frames. | ||
951 | * Remove these mic errors. | ||
952 | */ | ||
953 | ds->ds_rxstat.rs_status &= | ||
954 | ~ATH9K_RXERR_MIC; | ||
955 | else | ||
956 | rx_status.flags |= ATH_RX_MIC_ERROR; | ||
957 | } | ||
958 | /* | ||
959 | * Reject error frames with the exception of | ||
960 | * decryption and MIC failures. For monitor mode, | ||
961 | * we also ignore the CRC error. | ||
962 | */ | ||
963 | if (sc->sc_ah->ah_opmode == ATH9K_M_MONITOR) { | ||
964 | if (ds->ds_rxstat.rs_status & | ||
965 | ~(ATH9K_RXERR_DECRYPT | ATH9K_RXERR_MIC | | ||
966 | ATH9K_RXERR_CRC)) | ||
967 | goto rx_next; | ||
968 | } else { | ||
969 | if (ds->ds_rxstat.rs_status & | ||
970 | ~(ATH9K_RXERR_DECRYPT | ATH9K_RXERR_MIC)) { | ||
971 | goto rx_next; | ||
972 | } | ||
973 | } | ||
974 | } | ||
975 | /* | ||
976 | * The status portion of the descriptor could get corrupted. | ||
977 | */ | ||
978 | if (sc->sc_rxbufsize < ds->ds_rxstat.rs_datalen) | ||
979 | goto rx_next; | ||
980 | /* | ||
981 | * Sync and unmap the frame. At this point we're | ||
982 | * committed to passing the sk_buff somewhere so | ||
983 | * clear buf_skb; this means a new sk_buff must be | ||
984 | * allocated when the rx descriptor is setup again | ||
985 | * to receive another frame. | ||
986 | */ | ||
987 | skb_put(skb, ds->ds_rxstat.rs_datalen); | 564 | skb_put(skb, ds->ds_rxstat.rs_datalen); |
988 | skb->protocol = cpu_to_be16(ETH_P_CONTROL); | 565 | skb->protocol = cpu_to_be16(ETH_P_CONTROL); |
989 | rx_status.tsf = ath_extend_tsf(sc, ds->ds_rxstat.rs_tstamp); | ||
990 | rx_status.rateieee = | ||
991 | sc->sc_hwmap[ds->ds_rxstat.rs_rate].ieeerate; | ||
992 | rx_status.rateKbps = | ||
993 | sc->sc_hwmap[ds->ds_rxstat.rs_rate].rateKbps; | ||
994 | rx_status.ratecode = ds->ds_rxstat.rs_rate; | ||
995 | 566 | ||
996 | /* HT rate */ | 567 | /* see if any padding is done by the hw and remove it */ |
997 | if (rx_status.ratecode & 0x80) { | 568 | hdr = (struct ieee80211_hdr *)skb->data; |
998 | /* TODO - add table to avoid division */ | 569 | hdrlen = ieee80211_get_hdrlen_from_skb(skb); |
999 | if (ds->ds_rxstat.rs_flags & ATH9K_RX_2040) { | 570 | |
1000 | rx_status.flags |= ATH_RX_40MHZ; | 571 | /* The MAC header is padded to have 32-bit boundary if the |
1001 | rx_status.rateKbps = | 572 | * packet payload is non-zero. The general calculation for |
1002 | (rx_status.rateKbps * 27) / 13; | 573 | * padsize would take into account odd header lengths: |
1003 | } | 574 | * padsize = (4 - hdrlen % 4) % 4; However, since only |
1004 | if (ds->ds_rxstat.rs_flags & ATH9K_RX_GI) | 575 | * even-length headers are used, padding can only be 0 or 2 |
1005 | rx_status.rateKbps = | 576 | * bytes and we can optimize this a bit. In addition, we must |
1006 | (rx_status.rateKbps * 10) / 9; | 577 | * not try to remove padding from short control frames that do |
1007 | else | 578 | * not have payload. */ |
1008 | rx_status.flags |= ATH_RX_SHORT_GI; | 579 | padsize = hdrlen & 3; |
580 | if (padsize && hdrlen >= 24) { | ||
581 | memmove(skb->data + padsize, skb->data, hdrlen); | ||
582 | skb_pull(skb, padsize); | ||
1009 | } | 583 | } |
1010 | 584 | ||
1011 | /* sc_noise_floor is only available when the station | 585 | keyix = ds->ds_rxstat.rs_keyix; |
1012 | attaches to an AP, so we use a default value | ||
1013 | if we are not yet attached. */ | ||
1014 | rx_status.abs_rssi = | ||
1015 | ds->ds_rxstat.rs_rssi + sc->sc_ani.sc_noise_floor; | ||
1016 | |||
1017 | pci_dma_sync_single_for_cpu(sc->pdev, | ||
1018 | bf->bf_buf_addr, | ||
1019 | sc->sc_rxbufsize, | ||
1020 | PCI_DMA_FROMDEVICE); | ||
1021 | pci_unmap_single(sc->pdev, | ||
1022 | bf->bf_buf_addr, | ||
1023 | sc->sc_rxbufsize, | ||
1024 | PCI_DMA_FROMDEVICE); | ||
1025 | 586 | ||
1026 | /* XXX: Ah! make me more readable, use a helper */ | 587 | if (!(keyix == ATH9K_RXKEYIX_INVALID) && !decrypt_error) { |
1027 | if (ah->ah_caps.hw_caps & ATH9K_HW_CAP_HT) { | 588 | rx_status.flag |= RX_FLAG_DECRYPTED; |
1028 | if (ds->ds_rxstat.rs_moreaggr == 0) { | 589 | } else if ((le16_to_cpu(hdr->frame_control) & IEEE80211_FCTL_PROTECTED) |
1029 | rx_status.rssictl[0] = | 590 | && !decrypt_error && skb->len >= hdrlen + 4) { |
1030 | ds->ds_rxstat.rs_rssi_ctl0; | 591 | keyix = skb->data[hdrlen + 3] >> 6; |
1031 | rx_status.rssictl[1] = | 592 | |
1032 | ds->ds_rxstat.rs_rssi_ctl1; | 593 | if (test_bit(keyix, sc->sc_keymap)) |
1033 | rx_status.rssictl[2] = | 594 | rx_status.flag |= RX_FLAG_DECRYPTED; |
1034 | ds->ds_rxstat.rs_rssi_ctl2; | ||
1035 | rx_status.rssi = ds->ds_rxstat.rs_rssi; | ||
1036 | if (ds->ds_rxstat.rs_flags & ATH9K_RX_2040) { | ||
1037 | rx_status.rssiextn[0] = | ||
1038 | ds->ds_rxstat.rs_rssi_ext0; | ||
1039 | rx_status.rssiextn[1] = | ||
1040 | ds->ds_rxstat.rs_rssi_ext1; | ||
1041 | rx_status.rssiextn[2] = | ||
1042 | ds->ds_rxstat.rs_rssi_ext2; | ||
1043 | rx_status.flags |= | ||
1044 | ATH_RX_RSSI_EXTN_VALID; | ||
1045 | } | ||
1046 | rx_status.flags |= ATH_RX_RSSI_VALID | | ||
1047 | ATH_RX_CHAIN_RSSI_VALID; | ||
1048 | } | ||
1049 | } else { | ||
1050 | /* | ||
1051 | * Need to insert the "combined" rssi into the | ||
1052 | * status structure for upper layer processing | ||
1053 | */ | ||
1054 | rx_status.rssi = ds->ds_rxstat.rs_rssi; | ||
1055 | rx_status.flags |= ATH_RX_RSSI_VALID; | ||
1056 | } | 595 | } |
1057 | 596 | ||
1058 | /* Pass frames up to the stack. */ | 597 | /* Send the frame to mac80211 */ |
598 | __ieee80211_rx(sc->hw, skb, &rx_status); | ||
1059 | 599 | ||
1060 | type = ath_rx_indicate(sc, skb, | 600 | /* We will now give hardware our shiny new allocated skb */ |
1061 | &rx_status, ds->ds_rxstat.rs_keyix); | 601 | bf->bf_mpdu = requeue_skb; |
602 | bf->bf_buf_addr = pci_map_single(sc->pdev, requeue_skb->data, | ||
603 | sc->rx.bufsize, | ||
604 | PCI_DMA_FROMDEVICE); | ||
605 | if (unlikely(pci_dma_mapping_error(sc->pdev, | ||
606 | bf->bf_buf_addr))) { | ||
607 | dev_kfree_skb_any(requeue_skb); | ||
608 | bf->bf_mpdu = NULL; | ||
609 | DPRINTF(sc, ATH_DBG_CONFIG, | ||
610 | "pci_dma_mapping_error() on RX\n"); | ||
611 | break; | ||
612 | } | ||
613 | bf->bf_dmacontext = bf->bf_buf_addr; | ||
1062 | 614 | ||
1063 | /* | 615 | /* |
1064 | * change the default rx antenna if rx diversity chooses the | 616 | * change the default rx antenna if rx diversity chooses the |
1065 | * other antenna 3 times in a row. | 617 | * other antenna 3 times in a row. |
1066 | */ | 618 | */ |
1067 | if (sc->sc_defant != ds->ds_rxstat.rs_antenna) { | 619 | if (sc->rx.defant != ds->ds_rxstat.rs_antenna) { |
1068 | if (++sc->sc_rxotherant >= 3) | 620 | if (++sc->rx.rxotherant >= 3) |
1069 | ath_setdefantenna(sc, | 621 | ath_setdefantenna(sc, ds->ds_rxstat.rs_antenna); |
1070 | ds->ds_rxstat.rs_antenna); | ||
1071 | } else { | 622 | } else { |
1072 | sc->sc_rxotherant = 0; | 623 | sc->rx.rxotherant = 0; |
1073 | } | 624 | } |
625 | requeue: | ||
626 | list_move_tail(&bf->list, &sc->rx.rxbuf); | ||
627 | ath_rx_buf_link(sc, bf); | ||
628 | } while (1); | ||
1074 | 629 | ||
1075 | #ifdef CONFIG_SLOW_ANT_DIV | 630 | spin_unlock_bh(&sc->rx.rxbuflock); |
1076 | if ((rx_status.flags & ATH_RX_RSSI_VALID) && | ||
1077 | ieee80211_is_beacon(fc)) { | ||
1078 | ath_slow_ant_div(&sc->sc_antdiv, hdr, &ds->ds_rxstat); | ||
1079 | } | ||
1080 | #endif | ||
1081 | /* | ||
1082 | * For frames successfully indicated, the buffer will be | ||
1083 | * returned to us by upper layers by calling | ||
1084 | * ath_rx_mpdu_requeue, either synchronusly or asynchronously. | ||
1085 | * So we don't want to do it here in this loop. | ||
1086 | */ | ||
1087 | continue; | ||
1088 | |||
1089 | rx_next: | ||
1090 | bf->bf_status |= ATH_BUFSTATUS_FREE; | ||
1091 | } while (TRUE); | ||
1092 | |||
1093 | if (chainreset) { | ||
1094 | DPRINTF(sc, ATH_DBG_CONFIG, | ||
1095 | "%s: Reset rx chain mask. " | ||
1096 | "Do internal reset\n", __func__); | ||
1097 | ASSERT(flush == 0); | ||
1098 | ath_reset(sc, false); | ||
1099 | } | ||
1100 | 631 | ||
1101 | return 0; | 632 | return 0; |
1102 | #undef PA2DESC | 633 | #undef PA2DESC |
1103 | } | 634 | } |
1104 | |||
1105 | /* Process ADDBA request in per-TID data structure */ | ||
1106 | |||
1107 | int ath_rx_aggr_start(struct ath_softc *sc, | ||
1108 | const u8 *addr, | ||
1109 | u16 tid, | ||
1110 | u16 *ssn) | ||
1111 | { | ||
1112 | struct ath_arx_tid *rxtid; | ||
1113 | struct ath_node *an; | ||
1114 | struct ieee80211_hw *hw = sc->hw; | ||
1115 | struct ieee80211_supported_band *sband; | ||
1116 | u16 buffersize = 0; | ||
1117 | |||
1118 | spin_lock_bh(&sc->node_lock); | ||
1119 | an = ath_node_find(sc, (u8 *) addr); | ||
1120 | spin_unlock_bh(&sc->node_lock); | ||
1121 | |||
1122 | if (!an) { | ||
1123 | DPRINTF(sc, ATH_DBG_AGGR, | ||
1124 | "%s: Node not found to initialize RX aggregation\n", | ||
1125 | __func__); | ||
1126 | return -1; | ||
1127 | } | ||
1128 | |||
1129 | sband = hw->wiphy->bands[hw->conf.channel->band]; | ||
1130 | buffersize = IEEE80211_MIN_AMPDU_BUF << | ||
1131 | sband->ht_info.ampdu_factor; /* FIXME */ | ||
1132 | |||
1133 | rxtid = &an->an_aggr.rx.tid[tid]; | ||
1134 | |||
1135 | spin_lock_bh(&rxtid->tidlock); | ||
1136 | if (sc->sc_flags & SC_OP_RXAGGR) { | ||
1137 | /* Allow aggregation reception | ||
1138 | * Adjust rx BA window size. Peer might indicate a | ||
1139 | * zero buffer size for a _dont_care_ condition. | ||
1140 | */ | ||
1141 | if (buffersize) | ||
1142 | rxtid->baw_size = min(buffersize, rxtid->baw_size); | ||
1143 | |||
1144 | /* set rx sequence number */ | ||
1145 | rxtid->seq_next = *ssn; | ||
1146 | |||
1147 | /* Allocate the receive buffers for this TID */ | ||
1148 | DPRINTF(sc, ATH_DBG_AGGR, | ||
1149 | "%s: Allcating rxbuffer for TID %d\n", __func__, tid); | ||
1150 | |||
1151 | if (rxtid->rxbuf == NULL) { | ||
1152 | /* | ||
1153 | * If the rxbuff is not NULL at this point, we *probably* | ||
1154 | * already allocated the buffer on a previous ADDBA, | ||
1155 | * and this is a subsequent ADDBA that got through. | ||
1156 | * Don't allocate, but use the value in the pointer, | ||
1157 | * we zero it out when we de-allocate. | ||
1158 | */ | ||
1159 | rxtid->rxbuf = kmalloc(ATH_TID_MAX_BUFS * | ||
1160 | sizeof(struct ath_rxbuf), GFP_ATOMIC); | ||
1161 | } | ||
1162 | if (rxtid->rxbuf == NULL) { | ||
1163 | DPRINTF(sc, ATH_DBG_AGGR, | ||
1164 | "%s: Unable to allocate RX buffer, " | ||
1165 | "refusing ADDBA\n", __func__); | ||
1166 | } else { | ||
1167 | /* Ensure the memory is zeroed out (all internal | ||
1168 | * pointers are null) */ | ||
1169 | memset(rxtid->rxbuf, 0, ATH_TID_MAX_BUFS * | ||
1170 | sizeof(struct ath_rxbuf)); | ||
1171 | DPRINTF(sc, ATH_DBG_AGGR, | ||
1172 | "%s: Allocated @%p\n", __func__, rxtid->rxbuf); | ||
1173 | |||
1174 | /* Allow aggregation reception */ | ||
1175 | rxtid->addba_exchangecomplete = 1; | ||
1176 | } | ||
1177 | } | ||
1178 | spin_unlock_bh(&rxtid->tidlock); | ||
1179 | |||
1180 | return 0; | ||
1181 | } | ||
1182 | |||
1183 | /* Process DELBA */ | ||
1184 | |||
1185 | int ath_rx_aggr_stop(struct ath_softc *sc, | ||
1186 | const u8 *addr, | ||
1187 | u16 tid) | ||
1188 | { | ||
1189 | struct ath_node *an; | ||
1190 | |||
1191 | spin_lock_bh(&sc->node_lock); | ||
1192 | an = ath_node_find(sc, (u8 *) addr); | ||
1193 | spin_unlock_bh(&sc->node_lock); | ||
1194 | |||
1195 | if (!an) { | ||
1196 | DPRINTF(sc, ATH_DBG_AGGR, | ||
1197 | "%s: RX aggr stop for non-existent node\n", __func__); | ||
1198 | return -1; | ||
1199 | } | ||
1200 | |||
1201 | ath_rx_aggr_teardown(sc, an, tid); | ||
1202 | return 0; | ||
1203 | } | ||
1204 | |||
1205 | /* Rx aggregation tear down */ | ||
1206 | |||
1207 | void ath_rx_aggr_teardown(struct ath_softc *sc, | ||
1208 | struct ath_node *an, u8 tid) | ||
1209 | { | ||
1210 | struct ath_arx_tid *rxtid = &an->an_aggr.rx.tid[tid]; | ||
1211 | |||
1212 | if (!rxtid->addba_exchangecomplete) | ||
1213 | return; | ||
1214 | |||
1215 | del_timer_sync(&rxtid->timer); | ||
1216 | ath_rx_flush_tid(sc, rxtid, 0); | ||
1217 | rxtid->addba_exchangecomplete = 0; | ||
1218 | |||
1219 | /* De-allocate the receive buffer array allocated when addba started */ | ||
1220 | |||
1221 | if (rxtid->rxbuf) { | ||
1222 | DPRINTF(sc, ATH_DBG_AGGR, | ||
1223 | "%s: Deallocating TID %d rxbuff @%p\n", | ||
1224 | __func__, tid, rxtid->rxbuf); | ||
1225 | kfree(rxtid->rxbuf); | ||
1226 | |||
1227 | /* Set pointer to null to avoid reuse*/ | ||
1228 | rxtid->rxbuf = NULL; | ||
1229 | } | ||
1230 | } | ||
1231 | |||
1232 | /* Initialize per-node receive state */ | ||
1233 | |||
1234 | void ath_rx_node_init(struct ath_softc *sc, struct ath_node *an) | ||
1235 | { | ||
1236 | if (sc->sc_flags & SC_OP_RXAGGR) { | ||
1237 | struct ath_arx_tid *rxtid; | ||
1238 | int tidno; | ||
1239 | |||
1240 | /* Init per tid rx state */ | ||
1241 | for (tidno = 0, rxtid = &an->an_aggr.rx.tid[tidno]; | ||
1242 | tidno < WME_NUM_TID; | ||
1243 | tidno++, rxtid++) { | ||
1244 | rxtid->an = an; | ||
1245 | rxtid->seq_reset = 1; | ||
1246 | rxtid->seq_next = 0; | ||
1247 | rxtid->baw_size = WME_MAX_BA; | ||
1248 | rxtid->baw_head = rxtid->baw_tail = 0; | ||
1249 | |||
1250 | /* | ||
1251 | * Ensure the buffer pointer is null at this point | ||
1252 | * (needs to be allocated when addba is received) | ||
1253 | */ | ||
1254 | |||
1255 | rxtid->rxbuf = NULL; | ||
1256 | setup_timer(&rxtid->timer, ath_rx_timer, | ||
1257 | (unsigned long)rxtid); | ||
1258 | spin_lock_init(&rxtid->tidlock); | ||
1259 | |||
1260 | /* ADDBA state */ | ||
1261 | rxtid->addba_exchangecomplete = 0; | ||
1262 | } | ||
1263 | } | ||
1264 | } | ||
1265 | |||
1266 | void ath_rx_node_cleanup(struct ath_softc *sc, struct ath_node *an) | ||
1267 | { | ||
1268 | if (sc->sc_flags & SC_OP_RXAGGR) { | ||
1269 | struct ath_arx_tid *rxtid; | ||
1270 | int tidno, i; | ||
1271 | |||
1272 | /* Init per tid rx state */ | ||
1273 | for (tidno = 0, rxtid = &an->an_aggr.rx.tid[tidno]; | ||
1274 | tidno < WME_NUM_TID; | ||
1275 | tidno++, rxtid++) { | ||
1276 | |||
1277 | if (!rxtid->addba_exchangecomplete) | ||
1278 | continue; | ||
1279 | |||
1280 | /* must cancel timer first */ | ||
1281 | del_timer_sync(&rxtid->timer); | ||
1282 | |||
1283 | /* drop any pending sub-frames */ | ||
1284 | ath_rx_flush_tid(sc, rxtid, 1); | ||
1285 | |||
1286 | for (i = 0; i < ATH_TID_MAX_BUFS; i++) | ||
1287 | ASSERT(rxtid->rxbuf[i].rx_wbuf == NULL); | ||
1288 | |||
1289 | rxtid->addba_exchangecomplete = 0; | ||
1290 | } | ||
1291 | } | ||
1292 | |||
1293 | } | ||
1294 | |||
1295 | /* Cleanup per-node receive state */ | ||
1296 | |||
1297 | void ath_rx_node_free(struct ath_softc *sc, struct ath_node *an) | ||
1298 | { | ||
1299 | ath_rx_node_cleanup(sc, an); | ||
1300 | } | ||