aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net
diff options
context:
space:
mode:
authorSujith <Sujith.Manoharan@atheros.com>2008-10-29 00:49:01 -0400
committerJohn W. Linville <linville@tuxdriver.com>2008-11-10 15:17:32 -0500
commitdca3edb88ef567671886a85c5e40d491ccecf934 (patch)
tree14a8a876decc06313ba548080dbb9c2740b9358f /drivers/net
parent7f959032bbee5eedaf4c606d11f0c5d41498644e (diff)
ath9k: Remove internal RX A-MPDU processing
mac80211 has RX A-MPDU reordering support. Use that and remove redundant RX processing within the driver. Signed-off-by: Sujith <Sujith.Manoharan@atheros.com> Signed-off-by: John W. Linville <linville@tuxdriver.com>
Diffstat (limited to 'drivers/net')
-rw-r--r--drivers/net/wireless/ath9k/core.c6
-rw-r--r--drivers/net/wireless/ath9k/core.h43
-rw-r--r--drivers/net/wireless/ath9k/main.c36
-rw-r--r--drivers/net/wireless/ath9k/recv.c501
4 files changed, 2 insertions, 584 deletions
diff --git a/drivers/net/wireless/ath9k/core.c b/drivers/net/wireless/ath9k/core.c
index d13844647f93..5f5184acb274 100644
--- a/drivers/net/wireless/ath9k/core.c
+++ b/drivers/net/wireless/ath9k/core.c
@@ -1189,8 +1189,6 @@ void ath_node_attach(struct ath_softc *sc, struct ieee80211_sta *sta)
1189 1189
1190 if (sc->sc_flags & SC_OP_TXAGGR) 1190 if (sc->sc_flags & SC_OP_TXAGGR)
1191 ath_tx_node_init(sc, an); 1191 ath_tx_node_init(sc, an);
1192 if (sc->sc_flags & SC_OP_RXAGGR)
1193 ath_rx_node_init(sc, an);
1194 1192
1195 an->maxampdu = 1 << (IEEE80211_HTCAP_MAXRXAMPDU_FACTOR + 1193 an->maxampdu = 1 << (IEEE80211_HTCAP_MAXRXAMPDU_FACTOR +
1196 sta->ht_cap.ampdu_factor); 1194 sta->ht_cap.ampdu_factor);
@@ -1208,8 +1206,6 @@ void ath_node_detach(struct ath_softc *sc, struct ieee80211_sta *sta)
1208 1206
1209 if (sc->sc_flags & SC_OP_TXAGGR) 1207 if (sc->sc_flags & SC_OP_TXAGGR)
1210 ath_tx_node_cleanup(sc, an); 1208 ath_tx_node_cleanup(sc, an);
1211 if (sc->sc_flags & SC_OP_RXAGGR)
1212 ath_rx_node_cleanup(sc, an);
1213} 1209}
1214 1210
1215/* 1211/*
@@ -1230,8 +1226,6 @@ void ath_newassoc(struct ath_softc *sc,
1230 for (tidno = 0; tidno < WME_NUM_TID; tidno++) { 1226 for (tidno = 0; tidno < WME_NUM_TID; tidno++) {
1231 if (sc->sc_flags & SC_OP_TXAGGR) 1227 if (sc->sc_flags & SC_OP_TXAGGR)
1232 ath_tx_aggr_teardown(sc, an, tidno); 1228 ath_tx_aggr_teardown(sc, an, tidno);
1233 if (sc->sc_flags & SC_OP_RXAGGR)
1234 ath_rx_aggr_teardown(sc, an, tidno);
1235 } 1229 }
1236 } 1230 }
1237} 1231}
diff --git a/drivers/net/wireless/ath9k/core.h b/drivers/net/wireless/ath9k/core.h
index dd33cb7f2ca6..5b17e88ab9a9 100644
--- a/drivers/net/wireless/ath9k/core.h
+++ b/drivers/net/wireless/ath9k/core.h
@@ -304,15 +304,7 @@ void ath_descdma_cleanup(struct ath_softc *sc,
304 304
305#define ATH_MAX_ANTENNA 3 305#define ATH_MAX_ANTENNA 3
306#define ATH_RXBUF 512 306#define ATH_RXBUF 512
307#define ATH_RX_TIMEOUT 40 /* 40 milliseconds */
308#define WME_NUM_TID 16 307#define WME_NUM_TID 16
309#define IEEE80211_BAR_CTL_TID_M 0xF000 /* tid mask */
310#define IEEE80211_BAR_CTL_TID_S 12 /* tid shift */
311
312enum ATH_RX_TYPE {
313 ATH_RX_NON_CONSUMED = 0,
314 ATH_RX_CONSUMED
315};
316 308
317/* per frame rx status block */ 309/* per frame rx status block */
318struct ath_recv_status { 310struct ath_recv_status {
@@ -346,47 +338,18 @@ struct ath_rxbuf {
346 struct ath_recv_status rx_status; /* cached rx status */ 338 struct ath_recv_status rx_status; /* cached rx status */
347}; 339};
348 340
349/* Per-TID aggregate receiver state for a node */
350struct ath_arx_tid {
351 struct ath_node *an;
352 struct ath_rxbuf *rxbuf; /* re-ordering buffer */
353 struct timer_list timer;
354 spinlock_t tidlock;
355 int baw_head; /* seq_next at head */
356 int baw_tail; /* tail of block-ack window */
357 int seq_reset; /* need to reset start sequence */
358 int addba_exchangecomplete;
359 u16 seq_next; /* next expected sequence */
360 u16 baw_size; /* block-ack window size */
361};
362
363/* Per-node receiver aggregate state */
364struct ath_arx {
365 struct ath_arx_tid tid[WME_NUM_TID];
366};
367
368int ath_startrecv(struct ath_softc *sc); 341int ath_startrecv(struct ath_softc *sc);
369bool ath_stoprecv(struct ath_softc *sc); 342bool ath_stoprecv(struct ath_softc *sc);
370void ath_flushrecv(struct ath_softc *sc); 343void ath_flushrecv(struct ath_softc *sc);
371u32 ath_calcrxfilter(struct ath_softc *sc); 344u32 ath_calcrxfilter(struct ath_softc *sc);
372void ath_rx_node_init(struct ath_softc *sc, struct ath_node *an);
373void ath_rx_node_cleanup(struct ath_softc *sc, struct ath_node *an);
374void ath_handle_rx_intr(struct ath_softc *sc); 345void ath_handle_rx_intr(struct ath_softc *sc);
375int ath_rx_init(struct ath_softc *sc, int nbufs); 346int ath_rx_init(struct ath_softc *sc, int nbufs);
376void ath_rx_cleanup(struct ath_softc *sc); 347void ath_rx_cleanup(struct ath_softc *sc);
377int ath_rx_tasklet(struct ath_softc *sc, int flush); 348int ath_rx_tasklet(struct ath_softc *sc, int flush);
378int ath_rx_input(struct ath_softc *sc,
379 struct ath_node *node,
380 struct sk_buff *skb,
381 struct ath_recv_status *rx_status,
382 enum ATH_RX_TYPE *status);
383int _ath_rx_indicate(struct ath_softc *sc, 349int _ath_rx_indicate(struct ath_softc *sc,
384 struct sk_buff *skb, 350 struct sk_buff *skb,
385 struct ath_recv_status *status, 351 struct ath_recv_status *status,
386 u16 keyix); 352 u16 keyix);
387int ath_rx_subframe(struct ath_node *an, struct sk_buff *skb,
388 struct ath_recv_status *status);
389
390/******/ 353/******/
391/* TX */ 354/* TX */
392/******/ 355/******/
@@ -599,7 +562,6 @@ struct aggr_rifs_param {
599/* Per-node aggregation state */ 562/* Per-node aggregation state */
600struct ath_node_aggr { 563struct ath_node_aggr {
601 struct ath_atx tx; /* node transmit state */ 564 struct ath_atx tx; /* node transmit state */
602 struct ath_arx rx; /* node receive state */
603}; 565};
604 566
605/* driver-specific node state */ 567/* driver-specific node state */
@@ -616,11 +578,6 @@ void ath_tx_resume_tid(struct ath_softc *sc,
616bool ath_tx_aggr_check(struct ath_softc *sc, struct ath_node *an, u8 tidno); 578bool ath_tx_aggr_check(struct ath_softc *sc, struct ath_node *an, u8 tidno);
617void ath_tx_aggr_teardown(struct ath_softc *sc, 579void ath_tx_aggr_teardown(struct ath_softc *sc,
618 struct ath_node *an, u8 tidno); 580 struct ath_node *an, u8 tidno);
619void ath_rx_aggr_teardown(struct ath_softc *sc,
620 struct ath_node *an, u8 tidno);
621int ath_rx_aggr_start(struct ath_softc *sc, struct ieee80211_sta *sta,
622 u16 tid, u16 *ssn);
623int ath_rx_aggr_stop(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid);
624int ath_tx_aggr_start(struct ath_softc *sc, struct ieee80211_sta *sta, 581int ath_tx_aggr_start(struct ath_softc *sc, struct ieee80211_sta *sta,
625 u16 tid, u16 *ssn); 582 u16 tid, u16 *ssn);
626int ath_tx_aggr_stop(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid); 583int ath_tx_aggr_stop(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid);
diff --git a/drivers/net/wireless/ath9k/main.c b/drivers/net/wireless/ath9k/main.c
index e177de47e3e0..65a532e08ecd 100644
--- a/drivers/net/wireless/ath9k/main.c
+++ b/drivers/net/wireless/ath9k/main.c
@@ -444,12 +444,10 @@ int _ath_rx_indicate(struct ath_softc *sc,
444 u16 keyix) 444 u16 keyix)
445{ 445{
446 struct ieee80211_hw *hw = sc->hw; 446 struct ieee80211_hw *hw = sc->hw;
447 struct ath_node *an = NULL;
448 struct ieee80211_rx_status rx_status; 447 struct ieee80211_rx_status rx_status;
449 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; 448 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
450 int hdrlen = ieee80211_get_hdrlen_from_skb(skb); 449 int hdrlen = ieee80211_get_hdrlen_from_skb(skb);
451 int padsize; 450 int padsize;
452 enum ATH_RX_TYPE st;
453 451
454 /* see if any padding is done by the hw and remove it */ 452 /* see if any padding is done by the hw and remove it */
455 if (hdrlen & 3) { 453 if (hdrlen & 3) {
@@ -473,28 +471,6 @@ int _ath_rx_indicate(struct ath_softc *sc,
473 rx_status.flag |= RX_FLAG_DECRYPTED; 471 rx_status.flag |= RX_FLAG_DECRYPTED;
474 } 472 }
475 473
476 if (an) {
477 ath_rx_input(sc, an,
478 skb, status, &st);
479 }
480 if (!an || (st != ATH_RX_CONSUMED))
481 __ieee80211_rx(hw, skb, &rx_status);
482
483 return 0;
484}
485
486int ath_rx_subframe(struct ath_node *an, struct sk_buff *skb,
487 struct ath_recv_status *status)
488{
489 struct ath_softc *sc = an->an_sc;
490 struct ieee80211_hw *hw = sc->hw;
491 struct ieee80211_rx_status rx_status;
492
493 /* Prepare rx status */
494 ath9k_rx_prepare(sc, skb, status, &rx_status);
495 if (!(status->flags & ATH_RX_DECRYPT_ERROR))
496 rx_status.flag |= RX_FLAG_DECRYPTED;
497
498 __ieee80211_rx(hw, skb, &rx_status); 474 __ieee80211_rx(hw, skb, &rx_status);
499 475
500 return 0; 476 return 0;
@@ -1483,18 +1459,10 @@ static int ath9k_ampdu_action(struct ieee80211_hw *hw,
1483 1459
1484 switch (action) { 1460 switch (action) {
1485 case IEEE80211_AMPDU_RX_START: 1461 case IEEE80211_AMPDU_RX_START:
1486 ret = ath_rx_aggr_start(sc, sta, tid, ssn); 1462 if (!(sc->sc_flags & SC_OP_RXAGGR))
1487 if (ret < 0) 1463 ret = -ENOTSUPP;
1488 DPRINTF(sc, ATH_DBG_FATAL,
1489 "%s: Unable to start RX aggregation\n",
1490 __func__);
1491 break; 1464 break;
1492 case IEEE80211_AMPDU_RX_STOP: 1465 case IEEE80211_AMPDU_RX_STOP:
1493 ret = ath_rx_aggr_stop(sc, sta, tid);
1494 if (ret < 0)
1495 DPRINTF(sc, ATH_DBG_FATAL,
1496 "%s: Unable to stop RX aggregation\n",
1497 __func__);
1498 break; 1466 break;
1499 case IEEE80211_AMPDU_TX_START: 1467 case IEEE80211_AMPDU_TX_START:
1500 ret = ath_tx_aggr_start(sc, sta, tid, ssn); 1468 ret = ath_tx_aggr_start(sc, sta, tid, ssn);
diff --git a/drivers/net/wireless/ath9k/recv.c b/drivers/net/wireless/ath9k/recv.c
index 80f26b4c4e06..2ecb0a010ce2 100644
--- a/drivers/net/wireless/ath9k/recv.c
+++ b/drivers/net/wireless/ath9k/recv.c
@@ -64,328 +64,6 @@ static void ath_rx_buf_link(struct ath_softc *sc, struct ath_buf *bf)
64 ath9k_hw_rxena(ah); 64 ath9k_hw_rxena(ah);
65} 65}
66 66
67/* Process received BAR frame */
68
69static int ath_bar_rx(struct ath_softc *sc,
70 struct ath_node *an,
71 struct sk_buff *skb)
72{
73 struct ieee80211_bar *bar;
74 struct ath_arx_tid *rxtid;
75 struct sk_buff *tskb;
76 struct ath_recv_status *rx_status;
77 int tidno, index, cindex;
78 u16 seqno;
79
80 /* look at BAR contents */
81
82 bar = (struct ieee80211_bar *)skb->data;
83 tidno = (le16_to_cpu(bar->control) & IEEE80211_BAR_CTL_TID_M)
84 >> IEEE80211_BAR_CTL_TID_S;
85 seqno = le16_to_cpu(bar->start_seq_num) >> IEEE80211_SEQ_SEQ_SHIFT;
86
87 /* process BAR - indicate all pending RX frames till the BAR seqno */
88
89 rxtid = &an->an_aggr.rx.tid[tidno];
90
91 spin_lock_bh(&rxtid->tidlock);
92
93 /* get relative index */
94
95 index = ATH_BA_INDEX(rxtid->seq_next, seqno);
96
97 /* drop BAR if old sequence (index is too large) */
98
99 if ((index > rxtid->baw_size) &&
100 (index > (IEEE80211_SEQ_MAX - (rxtid->baw_size << 2))))
101 /* discard frame, ieee layer may not treat frame as a dup */
102 goto unlock_and_free;
103
104 /* complete receive processing for all pending frames upto BAR seqno */
105
106 cindex = (rxtid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
107 while ((rxtid->baw_head != rxtid->baw_tail) &&
108 (rxtid->baw_head != cindex)) {
109 tskb = rxtid->rxbuf[rxtid->baw_head].rx_wbuf;
110 rx_status = &rxtid->rxbuf[rxtid->baw_head].rx_status;
111 rxtid->rxbuf[rxtid->baw_head].rx_wbuf = NULL;
112
113 if (tskb != NULL)
114 ath_rx_subframe(an, tskb, rx_status);
115
116 INCR(rxtid->baw_head, ATH_TID_MAX_BUFS);
117 INCR(rxtid->seq_next, IEEE80211_SEQ_MAX);
118 }
119
120 /* ... and indicate rest of the frames in-order */
121
122 while (rxtid->baw_head != rxtid->baw_tail &&
123 rxtid->rxbuf[rxtid->baw_head].rx_wbuf != NULL) {
124 tskb = rxtid->rxbuf[rxtid->baw_head].rx_wbuf;
125 rx_status = &rxtid->rxbuf[rxtid->baw_head].rx_status;
126 rxtid->rxbuf[rxtid->baw_head].rx_wbuf = NULL;
127
128 ath_rx_subframe(an, tskb, rx_status);
129
130 INCR(rxtid->baw_head, ATH_TID_MAX_BUFS);
131 INCR(rxtid->seq_next, IEEE80211_SEQ_MAX);
132 }
133
134unlock_and_free:
135 spin_unlock_bh(&rxtid->tidlock);
136 /* free bar itself */
137 dev_kfree_skb(skb);
138 return IEEE80211_FTYPE_CTL;
139}
140
141/* Function to handle a subframe of aggregation when HT is enabled */
142
143static int ath_ampdu_input(struct ath_softc *sc,
144 struct ath_node *an,
145 struct sk_buff *skb,
146 struct ath_recv_status *rx_status)
147{
148 struct ieee80211_hdr *hdr;
149 struct ath_arx_tid *rxtid;
150 struct ath_rxbuf *rxbuf;
151 u8 type, subtype;
152 u16 rxseq;
153 int tid = 0, index, cindex, rxdiff;
154 __le16 fc;
155 u8 *qc;
156
157 hdr = (struct ieee80211_hdr *)skb->data;
158 fc = hdr->frame_control;
159
160 /* collect stats of frames with non-zero version */
161
162 if ((le16_to_cpu(hdr->frame_control) & IEEE80211_FCTL_VERS) != 0) {
163 dev_kfree_skb(skb);
164 return -1;
165 }
166
167 type = le16_to_cpu(hdr->frame_control) & IEEE80211_FCTL_FTYPE;
168 subtype = le16_to_cpu(hdr->frame_control) & IEEE80211_FCTL_STYPE;
169
170 if (ieee80211_is_back_req(fc))
171 return ath_bar_rx(sc, an, skb);
172
173 /* special aggregate processing only for qos unicast data frames */
174
175 if (!ieee80211_is_data(fc) ||
176 !ieee80211_is_data_qos(fc) ||
177 is_multicast_ether_addr(hdr->addr1))
178 return ath_rx_subframe(an, skb, rx_status);
179
180 /* lookup rx tid state */
181
182 if (ieee80211_is_data_qos(fc)) {
183 qc = ieee80211_get_qos_ctl(hdr);
184 tid = qc[0] & 0xf;
185 }
186
187 if (sc->sc_ah->ah_opmode == ATH9K_M_STA) {
188 /* Drop the frame not belonging to me. */
189 if (memcmp(hdr->addr1, sc->sc_myaddr, ETH_ALEN)) {
190 dev_kfree_skb(skb);
191 return -1;
192 }
193 }
194
195 rxtid = &an->an_aggr.rx.tid[tid];
196
197 spin_lock(&rxtid->tidlock);
198
199 rxdiff = (rxtid->baw_tail - rxtid->baw_head) &
200 (ATH_TID_MAX_BUFS - 1);
201
202 /*
203 * If the ADDBA exchange has not been completed by the source,
204 * process via legacy path (i.e. no reordering buffer is needed)
205 */
206 if (!rxtid->addba_exchangecomplete) {
207 spin_unlock(&rxtid->tidlock);
208 return ath_rx_subframe(an, skb, rx_status);
209 }
210
211 /* extract sequence number from recvd frame */
212
213 rxseq = le16_to_cpu(hdr->seq_ctrl) >> IEEE80211_SEQ_SEQ_SHIFT;
214
215 if (rxtid->seq_reset) {
216 rxtid->seq_reset = 0;
217 rxtid->seq_next = rxseq;
218 }
219
220 index = ATH_BA_INDEX(rxtid->seq_next, rxseq);
221
222 /* drop frame if old sequence (index is too large) */
223
224 if (index > (IEEE80211_SEQ_MAX - (rxtid->baw_size << 2))) {
225 /* discard frame, ieee layer may not treat frame as a dup */
226 spin_unlock(&rxtid->tidlock);
227 dev_kfree_skb(skb);
228 return IEEE80211_FTYPE_DATA;
229 }
230
231 /* sequence number is beyond block-ack window */
232
233 if (index >= rxtid->baw_size) {
234
235 /* complete receive processing for all pending frames */
236
237 while (index >= rxtid->baw_size) {
238
239 rxbuf = rxtid->rxbuf + rxtid->baw_head;
240
241 if (rxbuf->rx_wbuf != NULL) {
242 ath_rx_subframe(an, rxbuf->rx_wbuf,
243 &rxbuf->rx_status);
244 rxbuf->rx_wbuf = NULL;
245 }
246
247 INCR(rxtid->baw_head, ATH_TID_MAX_BUFS);
248 INCR(rxtid->seq_next, IEEE80211_SEQ_MAX);
249
250 index--;
251 }
252 }
253
254 /* add buffer to the recv ba window */
255
256 cindex = (rxtid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
257 rxbuf = rxtid->rxbuf + cindex;
258
259 if (rxbuf->rx_wbuf != NULL) {
260 spin_unlock(&rxtid->tidlock);
261 /* duplicate frame */
262 dev_kfree_skb(skb);
263 return IEEE80211_FTYPE_DATA;
264 }
265
266 rxbuf->rx_wbuf = skb;
267 rxbuf->rx_time = get_timestamp();
268 rxbuf->rx_status = *rx_status;
269
270 /* advance tail if sequence received is newer
271 * than any received so far */
272
273 if (index >= rxdiff) {
274 rxtid->baw_tail = cindex;
275 INCR(rxtid->baw_tail, ATH_TID_MAX_BUFS);
276 }
277
278 /* indicate all in-order received frames */
279
280 while (rxtid->baw_head != rxtid->baw_tail) {
281 rxbuf = rxtid->rxbuf + rxtid->baw_head;
282 if (!rxbuf->rx_wbuf)
283 break;
284
285 ath_rx_subframe(an, rxbuf->rx_wbuf, &rxbuf->rx_status);
286 rxbuf->rx_wbuf = NULL;
287
288 INCR(rxtid->baw_head, ATH_TID_MAX_BUFS);
289 INCR(rxtid->seq_next, IEEE80211_SEQ_MAX);
290 }
291
292 /*
293 * start a timer to flush all received frames if there are pending
294 * receive frames
295 */
296 if (rxtid->baw_head != rxtid->baw_tail)
297 mod_timer(&rxtid->timer, ATH_RX_TIMEOUT);
298 else
299 del_timer_sync(&rxtid->timer);
300
301 spin_unlock(&rxtid->tidlock);
302 return IEEE80211_FTYPE_DATA;
303}
304
305/* Timer to flush all received sub-frames */
306
307static void ath_rx_timer(unsigned long data)
308{
309 struct ath_arx_tid *rxtid = (struct ath_arx_tid *)data;
310 struct ath_node *an = rxtid->an;
311 struct ath_rxbuf *rxbuf;
312 int nosched;
313
314 spin_lock_bh(&rxtid->tidlock);
315 while (rxtid->baw_head != rxtid->baw_tail) {
316 rxbuf = rxtid->rxbuf + rxtid->baw_head;
317 if (!rxbuf->rx_wbuf) {
318 INCR(rxtid->baw_head, ATH_TID_MAX_BUFS);
319 INCR(rxtid->seq_next, IEEE80211_SEQ_MAX);
320 continue;
321 }
322
323 /*
324 * Stop if the next one is a very recent frame.
325 *
326 * Call get_timestamp in every iteration to protect against the
327 * case in which a new frame is received while we are executing
328 * this function. Using a timestamp obtained before entering
329 * the loop could lead to a very large time interval
330 * (a negative value typecast to unsigned), breaking the
331 * function's logic.
332 */
333 if ((get_timestamp() - rxbuf->rx_time) <
334 (ATH_RX_TIMEOUT * HZ / 1000))
335 break;
336
337 ath_rx_subframe(an, rxbuf->rx_wbuf,
338 &rxbuf->rx_status);
339 rxbuf->rx_wbuf = NULL;
340
341 INCR(rxtid->baw_head, ATH_TID_MAX_BUFS);
342 INCR(rxtid->seq_next, IEEE80211_SEQ_MAX);
343 }
344
345 /*
346 * start a timer to flush all received frames if there are pending
347 * receive frames
348 */
349 if (rxtid->baw_head != rxtid->baw_tail)
350 nosched = 0;
351 else
352 nosched = 1; /* no need to re-arm the timer again */
353
354 spin_unlock_bh(&rxtid->tidlock);
355}
356
357/* Free all pending sub-frames in the re-ordering buffer */
358
359static void ath_rx_flush_tid(struct ath_softc *sc, struct ath_arx_tid *rxtid,
360 int drop)
361{
362 struct ath_rxbuf *rxbuf;
363 unsigned long flag;
364
365 spin_lock_irqsave(&rxtid->tidlock, flag);
366 while (rxtid->baw_head != rxtid->baw_tail) {
367 rxbuf = rxtid->rxbuf + rxtid->baw_head;
368 if (!rxbuf->rx_wbuf) {
369 INCR(rxtid->baw_head, ATH_TID_MAX_BUFS);
370 INCR(rxtid->seq_next, IEEE80211_SEQ_MAX);
371 continue;
372 }
373
374 if (drop)
375 dev_kfree_skb(rxbuf->rx_wbuf);
376 else
377 ath_rx_subframe(rxtid->an,
378 rxbuf->rx_wbuf,
379 &rxbuf->rx_status);
380
381 rxbuf->rx_wbuf = NULL;
382
383 INCR(rxtid->baw_head, ATH_TID_MAX_BUFS);
384 INCR(rxtid->seq_next, IEEE80211_SEQ_MAX);
385 }
386 spin_unlock_irqrestore(&rxtid->tidlock, flag);
387}
388
389static struct sk_buff *ath_rxbuf_alloc(struct ath_softc *sc, 67static struct sk_buff *ath_rxbuf_alloc(struct ath_softc *sc,
390 u32 len) 68 u32 len)
391{ 69{
@@ -716,23 +394,6 @@ void ath_flushrecv(struct ath_softc *sc)
716 spin_unlock_bh(&sc->sc_rxflushlock); 394 spin_unlock_bh(&sc->sc_rxflushlock);
717} 395}
718 396
719/* Process an individual frame */
720
721int ath_rx_input(struct ath_softc *sc,
722 struct ath_node *an,
723 struct sk_buff *skb,
724 struct ath_recv_status *rx_status,
725 enum ATH_RX_TYPE *status)
726{
727 if (sc->sc_flags & SC_OP_RXAGGR) {
728 *status = ATH_RX_CONSUMED;
729 return ath_ampdu_input(sc, an, skb, rx_status);
730 } else {
731 *status = ATH_RX_NON_CONSUMED;
732 return -1;
733 }
734}
735
736/* Process receive queue, as well as LED, etc. */ 397/* Process receive queue, as well as LED, etc. */
737 398
738int ath_rx_tasklet(struct ath_softc *sc, int flush) 399int ath_rx_tasklet(struct ath_softc *sc, int flush)
@@ -1091,165 +752,3 @@ rx_next:
1091 return 0; 752 return 0;
1092#undef PA2DESC 753#undef PA2DESC
1093} 754}
1094
1095/* Process ADDBA request in per-TID data structure */
1096
1097int ath_rx_aggr_start(struct ath_softc *sc,
1098 struct ieee80211_sta *sta,
1099 u16 tid,
1100 u16 *ssn)
1101{
1102 struct ath_arx_tid *rxtid;
1103 struct ath_node *an;
1104 struct ieee80211_hw *hw = sc->hw;
1105 struct ieee80211_supported_band *sband;
1106 u16 buffersize = 0;
1107
1108 an = (struct ath_node *)sta->drv_priv;
1109 sband = hw->wiphy->bands[hw->conf.channel->band];
1110 buffersize = IEEE80211_MIN_AMPDU_BUF <<
1111 sband->ht_cap.ampdu_factor; /* FIXME */
1112
1113 rxtid = &an->an_aggr.rx.tid[tid];
1114
1115 spin_lock_bh(&rxtid->tidlock);
1116 if (sc->sc_flags & SC_OP_RXAGGR) {
1117 /* Allow aggregation reception
1118 * Adjust rx BA window size. Peer might indicate a
1119 * zero buffer size for a _dont_care_ condition.
1120 */
1121 if (buffersize)
1122 rxtid->baw_size = min(buffersize, rxtid->baw_size);
1123
1124 /* set rx sequence number */
1125 rxtid->seq_next = *ssn;
1126
1127 /* Allocate the receive buffers for this TID */
1128 DPRINTF(sc, ATH_DBG_AGGR,
1129 "%s: Allcating rxbuffer for TID %d\n", __func__, tid);
1130
1131 if (rxtid->rxbuf == NULL) {
1132 /*
1133 * If the rxbuff is not NULL at this point, we *probably*
1134 * already allocated the buffer on a previous ADDBA,
1135 * and this is a subsequent ADDBA that got through.
1136 * Don't allocate, but use the value in the pointer,
1137 * we zero it out when we de-allocate.
1138 */
1139 rxtid->rxbuf = kmalloc(ATH_TID_MAX_BUFS *
1140 sizeof(struct ath_rxbuf), GFP_ATOMIC);
1141 }
1142 if (rxtid->rxbuf == NULL) {
1143 DPRINTF(sc, ATH_DBG_AGGR,
1144 "%s: Unable to allocate RX buffer, "
1145 "refusing ADDBA\n", __func__);
1146 } else {
1147 /* Ensure the memory is zeroed out (all internal
1148 * pointers are null) */
1149 memset(rxtid->rxbuf, 0, ATH_TID_MAX_BUFS *
1150 sizeof(struct ath_rxbuf));
1151 DPRINTF(sc, ATH_DBG_AGGR,
1152 "%s: Allocated @%p\n", __func__, rxtid->rxbuf);
1153
1154 /* Allow aggregation reception */
1155 rxtid->addba_exchangecomplete = 1;
1156 }
1157 }
1158 spin_unlock_bh(&rxtid->tidlock);
1159
1160 return 0;
1161}
1162
1163/* Process DELBA */
1164
1165int ath_rx_aggr_stop(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid)
1166{
1167 struct ath_node *an = (struct ath_node *)sta->drv_priv;
1168
1169 ath_rx_aggr_teardown(sc, an, tid);
1170 return 0;
1171}
1172
1173/* Rx aggregation tear down */
1174
1175void ath_rx_aggr_teardown(struct ath_softc *sc, struct ath_node *an, u8 tid)
1176{
1177 struct ath_arx_tid *rxtid = &an->an_aggr.rx.tid[tid];
1178
1179 if (!rxtid->addba_exchangecomplete)
1180 return;
1181
1182 del_timer_sync(&rxtid->timer);
1183 ath_rx_flush_tid(sc, rxtid, 0);
1184 rxtid->addba_exchangecomplete = 0;
1185
1186 /* De-allocate the receive buffer array allocated when addba started */
1187
1188 if (rxtid->rxbuf) {
1189 DPRINTF(sc, ATH_DBG_AGGR,
1190 "%s: Deallocating TID %d rxbuff @%p\n",
1191 __func__, tid, rxtid->rxbuf);
1192 kfree(rxtid->rxbuf);
1193
1194 /* Set pointer to null to avoid reuse*/
1195 rxtid->rxbuf = NULL;
1196 }
1197}
1198
1199/* Initialize per-node receive state */
1200
1201void ath_rx_node_init(struct ath_softc *sc, struct ath_node *an)
1202{
1203 struct ath_arx_tid *rxtid;
1204 int tidno;
1205
1206 /* Init per tid rx state */
1207 for (tidno = 0, rxtid = &an->an_aggr.rx.tid[tidno];
1208 tidno < WME_NUM_TID;
1209 tidno++, rxtid++) {
1210 rxtid->an = an;
1211 rxtid->seq_reset = 1;
1212 rxtid->seq_next = 0;
1213 rxtid->baw_size = WME_MAX_BA;
1214 rxtid->baw_head = rxtid->baw_tail = 0;
1215
1216 /*
1217 * Ensure the buffer pointer is null at this point
1218 * (needs to be allocated when addba is received)
1219 */
1220
1221 rxtid->rxbuf = NULL;
1222 setup_timer(&rxtid->timer, ath_rx_timer,
1223 (unsigned long)rxtid);
1224 spin_lock_init(&rxtid->tidlock);
1225
1226 /* ADDBA state */
1227 rxtid->addba_exchangecomplete = 0;
1228 }
1229}
1230
1231void ath_rx_node_cleanup(struct ath_softc *sc, struct ath_node *an)
1232{
1233 struct ath_arx_tid *rxtid;
1234 int tidno, i;
1235
1236 /* Init per tid rx state */
1237 for (tidno = 0, rxtid = &an->an_aggr.rx.tid[tidno];
1238 tidno < WME_NUM_TID;
1239 tidno++, rxtid++) {
1240
1241 if (!rxtid->addba_exchangecomplete)
1242 continue;
1243
1244 /* must cancel timer first */
1245 del_timer_sync(&rxtid->timer);
1246
1247 /* drop any pending sub-frames */
1248 ath_rx_flush_tid(sc, rxtid, 1);
1249
1250 for (i = 0; i < ATH_TID_MAX_BUFS; i++)
1251 ASSERT(rxtid->rxbuf[i].rx_wbuf == NULL);
1252
1253 rxtid->addba_exchangecomplete = 0;
1254 }
1255}