aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/wireless/ath9k/recv.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/wireless/ath9k/recv.c')
-rw-r--r--drivers/net/wireless/ath9k/recv.c501
1 files changed, 0 insertions, 501 deletions
diff --git a/drivers/net/wireless/ath9k/recv.c b/drivers/net/wireless/ath9k/recv.c
index 80f26b4c4e06..2ecb0a010ce2 100644
--- a/drivers/net/wireless/ath9k/recv.c
+++ b/drivers/net/wireless/ath9k/recv.c
@@ -64,328 +64,6 @@ static void ath_rx_buf_link(struct ath_softc *sc, struct ath_buf *bf)
64 ath9k_hw_rxena(ah); 64 ath9k_hw_rxena(ah);
65} 65}
66 66
67/* Process received BAR frame */
68
69static int ath_bar_rx(struct ath_softc *sc,
70 struct ath_node *an,
71 struct sk_buff *skb)
72{
73 struct ieee80211_bar *bar;
74 struct ath_arx_tid *rxtid;
75 struct sk_buff *tskb;
76 struct ath_recv_status *rx_status;
77 int tidno, index, cindex;
78 u16 seqno;
79
80 /* look at BAR contents */
81
82 bar = (struct ieee80211_bar *)skb->data;
83 tidno = (le16_to_cpu(bar->control) & IEEE80211_BAR_CTL_TID_M)
84 >> IEEE80211_BAR_CTL_TID_S;
85 seqno = le16_to_cpu(bar->start_seq_num) >> IEEE80211_SEQ_SEQ_SHIFT;
86
87 /* process BAR - indicate all pending RX frames till the BAR seqno */
88
89 rxtid = &an->an_aggr.rx.tid[tidno];
90
91 spin_lock_bh(&rxtid->tidlock);
92
93 /* get relative index */
94
95 index = ATH_BA_INDEX(rxtid->seq_next, seqno);
96
97 /* drop BAR if old sequence (index is too large) */
98
99 if ((index > rxtid->baw_size) &&
100 (index > (IEEE80211_SEQ_MAX - (rxtid->baw_size << 2))))
101 /* discard frame, ieee layer may not treat frame as a dup */
102 goto unlock_and_free;
103
104 /* complete receive processing for all pending frames upto BAR seqno */
105
106 cindex = (rxtid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
107 while ((rxtid->baw_head != rxtid->baw_tail) &&
108 (rxtid->baw_head != cindex)) {
109 tskb = rxtid->rxbuf[rxtid->baw_head].rx_wbuf;
110 rx_status = &rxtid->rxbuf[rxtid->baw_head].rx_status;
111 rxtid->rxbuf[rxtid->baw_head].rx_wbuf = NULL;
112
113 if (tskb != NULL)
114 ath_rx_subframe(an, tskb, rx_status);
115
116 INCR(rxtid->baw_head, ATH_TID_MAX_BUFS);
117 INCR(rxtid->seq_next, IEEE80211_SEQ_MAX);
118 }
119
120 /* ... and indicate rest of the frames in-order */
121
122 while (rxtid->baw_head != rxtid->baw_tail &&
123 rxtid->rxbuf[rxtid->baw_head].rx_wbuf != NULL) {
124 tskb = rxtid->rxbuf[rxtid->baw_head].rx_wbuf;
125 rx_status = &rxtid->rxbuf[rxtid->baw_head].rx_status;
126 rxtid->rxbuf[rxtid->baw_head].rx_wbuf = NULL;
127
128 ath_rx_subframe(an, tskb, rx_status);
129
130 INCR(rxtid->baw_head, ATH_TID_MAX_BUFS);
131 INCR(rxtid->seq_next, IEEE80211_SEQ_MAX);
132 }
133
134unlock_and_free:
135 spin_unlock_bh(&rxtid->tidlock);
136 /* free bar itself */
137 dev_kfree_skb(skb);
138 return IEEE80211_FTYPE_CTL;
139}
140
141/* Function to handle a subframe of aggregation when HT is enabled */
142
143static int ath_ampdu_input(struct ath_softc *sc,
144 struct ath_node *an,
145 struct sk_buff *skb,
146 struct ath_recv_status *rx_status)
147{
148 struct ieee80211_hdr *hdr;
149 struct ath_arx_tid *rxtid;
150 struct ath_rxbuf *rxbuf;
151 u8 type, subtype;
152 u16 rxseq;
153 int tid = 0, index, cindex, rxdiff;
154 __le16 fc;
155 u8 *qc;
156
157 hdr = (struct ieee80211_hdr *)skb->data;
158 fc = hdr->frame_control;
159
160 /* collect stats of frames with non-zero version */
161
162 if ((le16_to_cpu(hdr->frame_control) & IEEE80211_FCTL_VERS) != 0) {
163 dev_kfree_skb(skb);
164 return -1;
165 }
166
167 type = le16_to_cpu(hdr->frame_control) & IEEE80211_FCTL_FTYPE;
168 subtype = le16_to_cpu(hdr->frame_control) & IEEE80211_FCTL_STYPE;
169
170 if (ieee80211_is_back_req(fc))
171 return ath_bar_rx(sc, an, skb);
172
173 /* special aggregate processing only for qos unicast data frames */
174
175 if (!ieee80211_is_data(fc) ||
176 !ieee80211_is_data_qos(fc) ||
177 is_multicast_ether_addr(hdr->addr1))
178 return ath_rx_subframe(an, skb, rx_status);
179
180 /* lookup rx tid state */
181
182 if (ieee80211_is_data_qos(fc)) {
183 qc = ieee80211_get_qos_ctl(hdr);
184 tid = qc[0] & 0xf;
185 }
186
187 if (sc->sc_ah->ah_opmode == ATH9K_M_STA) {
188 /* Drop the frame not belonging to me. */
189 if (memcmp(hdr->addr1, sc->sc_myaddr, ETH_ALEN)) {
190 dev_kfree_skb(skb);
191 return -1;
192 }
193 }
194
195 rxtid = &an->an_aggr.rx.tid[tid];
196
197 spin_lock(&rxtid->tidlock);
198
199 rxdiff = (rxtid->baw_tail - rxtid->baw_head) &
200 (ATH_TID_MAX_BUFS - 1);
201
202 /*
203 * If the ADDBA exchange has not been completed by the source,
204 * process via legacy path (i.e. no reordering buffer is needed)
205 */
206 if (!rxtid->addba_exchangecomplete) {
207 spin_unlock(&rxtid->tidlock);
208 return ath_rx_subframe(an, skb, rx_status);
209 }
210
211 /* extract sequence number from recvd frame */
212
213 rxseq = le16_to_cpu(hdr->seq_ctrl) >> IEEE80211_SEQ_SEQ_SHIFT;
214
215 if (rxtid->seq_reset) {
216 rxtid->seq_reset = 0;
217 rxtid->seq_next = rxseq;
218 }
219
220 index = ATH_BA_INDEX(rxtid->seq_next, rxseq);
221
222 /* drop frame if old sequence (index is too large) */
223
224 if (index > (IEEE80211_SEQ_MAX - (rxtid->baw_size << 2))) {
225 /* discard frame, ieee layer may not treat frame as a dup */
226 spin_unlock(&rxtid->tidlock);
227 dev_kfree_skb(skb);
228 return IEEE80211_FTYPE_DATA;
229 }
230
231 /* sequence number is beyond block-ack window */
232
233 if (index >= rxtid->baw_size) {
234
235 /* complete receive processing for all pending frames */
236
237 while (index >= rxtid->baw_size) {
238
239 rxbuf = rxtid->rxbuf + rxtid->baw_head;
240
241 if (rxbuf->rx_wbuf != NULL) {
242 ath_rx_subframe(an, rxbuf->rx_wbuf,
243 &rxbuf->rx_status);
244 rxbuf->rx_wbuf = NULL;
245 }
246
247 INCR(rxtid->baw_head, ATH_TID_MAX_BUFS);
248 INCR(rxtid->seq_next, IEEE80211_SEQ_MAX);
249
250 index--;
251 }
252 }
253
254 /* add buffer to the recv ba window */
255
256 cindex = (rxtid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
257 rxbuf = rxtid->rxbuf + cindex;
258
259 if (rxbuf->rx_wbuf != NULL) {
260 spin_unlock(&rxtid->tidlock);
261 /* duplicate frame */
262 dev_kfree_skb(skb);
263 return IEEE80211_FTYPE_DATA;
264 }
265
266 rxbuf->rx_wbuf = skb;
267 rxbuf->rx_time = get_timestamp();
268 rxbuf->rx_status = *rx_status;
269
270 /* advance tail if sequence received is newer
271 * than any received so far */
272
273 if (index >= rxdiff) {
274 rxtid->baw_tail = cindex;
275 INCR(rxtid->baw_tail, ATH_TID_MAX_BUFS);
276 }
277
278 /* indicate all in-order received frames */
279
280 while (rxtid->baw_head != rxtid->baw_tail) {
281 rxbuf = rxtid->rxbuf + rxtid->baw_head;
282 if (!rxbuf->rx_wbuf)
283 break;
284
285 ath_rx_subframe(an, rxbuf->rx_wbuf, &rxbuf->rx_status);
286 rxbuf->rx_wbuf = NULL;
287
288 INCR(rxtid->baw_head, ATH_TID_MAX_BUFS);
289 INCR(rxtid->seq_next, IEEE80211_SEQ_MAX);
290 }
291
292 /*
293 * start a timer to flush all received frames if there are pending
294 * receive frames
295 */
296 if (rxtid->baw_head != rxtid->baw_tail)
297 mod_timer(&rxtid->timer, ATH_RX_TIMEOUT);
298 else
299 del_timer_sync(&rxtid->timer);
300
301 spin_unlock(&rxtid->tidlock);
302 return IEEE80211_FTYPE_DATA;
303}
304
305/* Timer to flush all received sub-frames */
306
307static void ath_rx_timer(unsigned long data)
308{
309 struct ath_arx_tid *rxtid = (struct ath_arx_tid *)data;
310 struct ath_node *an = rxtid->an;
311 struct ath_rxbuf *rxbuf;
312 int nosched;
313
314 spin_lock_bh(&rxtid->tidlock);
315 while (rxtid->baw_head != rxtid->baw_tail) {
316 rxbuf = rxtid->rxbuf + rxtid->baw_head;
317 if (!rxbuf->rx_wbuf) {
318 INCR(rxtid->baw_head, ATH_TID_MAX_BUFS);
319 INCR(rxtid->seq_next, IEEE80211_SEQ_MAX);
320 continue;
321 }
322
323 /*
324 * Stop if the next one is a very recent frame.
325 *
326 * Call get_timestamp in every iteration to protect against the
327 * case in which a new frame is received while we are executing
328 * this function. Using a timestamp obtained before entering
329 * the loop could lead to a very large time interval
330 * (a negative value typecast to unsigned), breaking the
331 * function's logic.
332 */
333 if ((get_timestamp() - rxbuf->rx_time) <
334 (ATH_RX_TIMEOUT * HZ / 1000))
335 break;
336
337 ath_rx_subframe(an, rxbuf->rx_wbuf,
338 &rxbuf->rx_status);
339 rxbuf->rx_wbuf = NULL;
340
341 INCR(rxtid->baw_head, ATH_TID_MAX_BUFS);
342 INCR(rxtid->seq_next, IEEE80211_SEQ_MAX);
343 }
344
345 /*
346 * start a timer to flush all received frames if there are pending
347 * receive frames
348 */
349 if (rxtid->baw_head != rxtid->baw_tail)
350 nosched = 0;
351 else
352 nosched = 1; /* no need to re-arm the timer again */
353
354 spin_unlock_bh(&rxtid->tidlock);
355}
356
357/* Free all pending sub-frames in the re-ordering buffer */
358
359static void ath_rx_flush_tid(struct ath_softc *sc, struct ath_arx_tid *rxtid,
360 int drop)
361{
362 struct ath_rxbuf *rxbuf;
363 unsigned long flag;
364
365 spin_lock_irqsave(&rxtid->tidlock, flag);
366 while (rxtid->baw_head != rxtid->baw_tail) {
367 rxbuf = rxtid->rxbuf + rxtid->baw_head;
368 if (!rxbuf->rx_wbuf) {
369 INCR(rxtid->baw_head, ATH_TID_MAX_BUFS);
370 INCR(rxtid->seq_next, IEEE80211_SEQ_MAX);
371 continue;
372 }
373
374 if (drop)
375 dev_kfree_skb(rxbuf->rx_wbuf);
376 else
377 ath_rx_subframe(rxtid->an,
378 rxbuf->rx_wbuf,
379 &rxbuf->rx_status);
380
381 rxbuf->rx_wbuf = NULL;
382
383 INCR(rxtid->baw_head, ATH_TID_MAX_BUFS);
384 INCR(rxtid->seq_next, IEEE80211_SEQ_MAX);
385 }
386 spin_unlock_irqrestore(&rxtid->tidlock, flag);
387}
388
389static struct sk_buff *ath_rxbuf_alloc(struct ath_softc *sc, 67static struct sk_buff *ath_rxbuf_alloc(struct ath_softc *sc,
390 u32 len) 68 u32 len)
391{ 69{
@@ -716,23 +394,6 @@ void ath_flushrecv(struct ath_softc *sc)
716 spin_unlock_bh(&sc->sc_rxflushlock); 394 spin_unlock_bh(&sc->sc_rxflushlock);
717} 395}
718 396
719/* Process an individual frame */
720
721int ath_rx_input(struct ath_softc *sc,
722 struct ath_node *an,
723 struct sk_buff *skb,
724 struct ath_recv_status *rx_status,
725 enum ATH_RX_TYPE *status)
726{
727 if (sc->sc_flags & SC_OP_RXAGGR) {
728 *status = ATH_RX_CONSUMED;
729 return ath_ampdu_input(sc, an, skb, rx_status);
730 } else {
731 *status = ATH_RX_NON_CONSUMED;
732 return -1;
733 }
734}
735
736/* Process receive queue, as well as LED, etc. */ 397/* Process receive queue, as well as LED, etc. */
737 398
738int ath_rx_tasklet(struct ath_softc *sc, int flush) 399int ath_rx_tasklet(struct ath_softc *sc, int flush)
@@ -1091,165 +752,3 @@ rx_next:
1091 return 0; 752 return 0;
1092#undef PA2DESC 753#undef PA2DESC
1093} 754}
1094
1095/* Process ADDBA request in per-TID data structure */
1096
1097int ath_rx_aggr_start(struct ath_softc *sc,
1098 struct ieee80211_sta *sta,
1099 u16 tid,
1100 u16 *ssn)
1101{
1102 struct ath_arx_tid *rxtid;
1103 struct ath_node *an;
1104 struct ieee80211_hw *hw = sc->hw;
1105 struct ieee80211_supported_band *sband;
1106 u16 buffersize = 0;
1107
1108 an = (struct ath_node *)sta->drv_priv;
1109 sband = hw->wiphy->bands[hw->conf.channel->band];
1110 buffersize = IEEE80211_MIN_AMPDU_BUF <<
1111 sband->ht_cap.ampdu_factor; /* FIXME */
1112
1113 rxtid = &an->an_aggr.rx.tid[tid];
1114
1115 spin_lock_bh(&rxtid->tidlock);
1116 if (sc->sc_flags & SC_OP_RXAGGR) {
1117 /* Allow aggregation reception
1118 * Adjust rx BA window size. Peer might indicate a
1119 * zero buffer size for a _dont_care_ condition.
1120 */
1121 if (buffersize)
1122 rxtid->baw_size = min(buffersize, rxtid->baw_size);
1123
1124 /* set rx sequence number */
1125 rxtid->seq_next = *ssn;
1126
1127 /* Allocate the receive buffers for this TID */
1128 DPRINTF(sc, ATH_DBG_AGGR,
1129 "%s: Allcating rxbuffer for TID %d\n", __func__, tid);
1130
1131 if (rxtid->rxbuf == NULL) {
1132 /*
1133 * If the rxbuff is not NULL at this point, we *probably*
1134 * already allocated the buffer on a previous ADDBA,
1135 * and this is a subsequent ADDBA that got through.
1136 * Don't allocate, but use the value in the pointer,
1137 * we zero it out when we de-allocate.
1138 */
1139 rxtid->rxbuf = kmalloc(ATH_TID_MAX_BUFS *
1140 sizeof(struct ath_rxbuf), GFP_ATOMIC);
1141 }
1142 if (rxtid->rxbuf == NULL) {
1143 DPRINTF(sc, ATH_DBG_AGGR,
1144 "%s: Unable to allocate RX buffer, "
1145 "refusing ADDBA\n", __func__);
1146 } else {
1147 /* Ensure the memory is zeroed out (all internal
1148 * pointers are null) */
1149 memset(rxtid->rxbuf, 0, ATH_TID_MAX_BUFS *
1150 sizeof(struct ath_rxbuf));
1151 DPRINTF(sc, ATH_DBG_AGGR,
1152 "%s: Allocated @%p\n", __func__, rxtid->rxbuf);
1153
1154 /* Allow aggregation reception */
1155 rxtid->addba_exchangecomplete = 1;
1156 }
1157 }
1158 spin_unlock_bh(&rxtid->tidlock);
1159
1160 return 0;
1161}
1162
1163/* Process DELBA */
1164
1165int ath_rx_aggr_stop(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid)
1166{
1167 struct ath_node *an = (struct ath_node *)sta->drv_priv;
1168
1169 ath_rx_aggr_teardown(sc, an, tid);
1170 return 0;
1171}
1172
1173/* Rx aggregation tear down */
1174
1175void ath_rx_aggr_teardown(struct ath_softc *sc, struct ath_node *an, u8 tid)
1176{
1177 struct ath_arx_tid *rxtid = &an->an_aggr.rx.tid[tid];
1178
1179 if (!rxtid->addba_exchangecomplete)
1180 return;
1181
1182 del_timer_sync(&rxtid->timer);
1183 ath_rx_flush_tid(sc, rxtid, 0);
1184 rxtid->addba_exchangecomplete = 0;
1185
1186 /* De-allocate the receive buffer array allocated when addba started */
1187
1188 if (rxtid->rxbuf) {
1189 DPRINTF(sc, ATH_DBG_AGGR,
1190 "%s: Deallocating TID %d rxbuff @%p\n",
1191 __func__, tid, rxtid->rxbuf);
1192 kfree(rxtid->rxbuf);
1193
1194 /* Set pointer to null to avoid reuse*/
1195 rxtid->rxbuf = NULL;
1196 }
1197}
1198
1199/* Initialize per-node receive state */
1200
1201void ath_rx_node_init(struct ath_softc *sc, struct ath_node *an)
1202{
1203 struct ath_arx_tid *rxtid;
1204 int tidno;
1205
1206 /* Init per tid rx state */
1207 for (tidno = 0, rxtid = &an->an_aggr.rx.tid[tidno];
1208 tidno < WME_NUM_TID;
1209 tidno++, rxtid++) {
1210 rxtid->an = an;
1211 rxtid->seq_reset = 1;
1212 rxtid->seq_next = 0;
1213 rxtid->baw_size = WME_MAX_BA;
1214 rxtid->baw_head = rxtid->baw_tail = 0;
1215
1216 /*
1217 * Ensure the buffer pointer is null at this point
1218 * (needs to be allocated when addba is received)
1219 */
1220
1221 rxtid->rxbuf = NULL;
1222 setup_timer(&rxtid->timer, ath_rx_timer,
1223 (unsigned long)rxtid);
1224 spin_lock_init(&rxtid->tidlock);
1225
1226 /* ADDBA state */
1227 rxtid->addba_exchangecomplete = 0;
1228 }
1229}
1230
1231void ath_rx_node_cleanup(struct ath_softc *sc, struct ath_node *an)
1232{
1233 struct ath_arx_tid *rxtid;
1234 int tidno, i;
1235
1236 /* Init per tid rx state */
1237 for (tidno = 0, rxtid = &an->an_aggr.rx.tid[tidno];
1238 tidno < WME_NUM_TID;
1239 tidno++, rxtid++) {
1240
1241 if (!rxtid->addba_exchangecomplete)
1242 continue;
1243
1244 /* must cancel timer first */
1245 del_timer_sync(&rxtid->timer);
1246
1247 /* drop any pending sub-frames */
1248 ath_rx_flush_tid(sc, rxtid, 1);
1249
1250 for (i = 0; i < ATH_TID_MAX_BUFS; i++)
1251 ASSERT(rxtid->rxbuf[i].rx_wbuf == NULL);
1252
1253 rxtid->addba_exchangecomplete = 0;
1254 }
1255}