aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorRon Rindjunsky <ron.rindjunsky@intel.com>2008-03-26 14:36:03 -0400
committerJohn W. Linville <linville@tuxdriver.com>2008-03-27 16:03:20 -0400
commitcee24a3e580f1062c8bb8b1692b95014d882bc7d (patch)
treee7df5e0f987a817ff0c9b14142a4efc583d39fa2
parent6c507cd0400cb51dd2ee251c1b8756b9375a1128 (diff)
mac80211: A-MPDU MLME use dynamic allocation
This patch alters the A-MPDU MLME in sta_info to use dynamic allocation, thus drastically improving memory usage - from a constant ~2 Kbyte in the previous (static) allocation to a lower limit of ~200 Byte and an upper limit of ~2 Kbyte. Signed-off-by: Ron Rindjunsky <ron.rindjunsky@intel.com> Signed-off-by: Tomas Winkler <tomas.winkler@intel.com> Signed-off-by: John W. Linville <linville@tuxdriver.com>
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-4965-rs.c2
-rw-r--r--net/mac80211/debugfs_sta.c17
-rw-r--r--net/mac80211/ieee80211.c64
-rw-r--r--net/mac80211/ieee80211_sta.c84
-rw-r--r--net/mac80211/rx.c10
-rw-r--r--net/mac80211/sta_info.c30
-rw-r--r--net/mac80211/sta_info.h44
7 files changed, 151 insertions, 100 deletions
diff --git a/drivers/net/wireless/iwlwifi/iwl-4965-rs.c b/drivers/net/wireless/iwlwifi/iwl-4965-rs.c
index 7d7ce7489ab5..735eadd57d18 100644
--- a/drivers/net/wireless/iwlwifi/iwl-4965-rs.c
+++ b/drivers/net/wireless/iwlwifi/iwl-4965-rs.c
@@ -397,7 +397,7 @@ static void rs_tl_turn_on_agg_for_tid(struct iwl_priv *priv,
397 DECLARE_MAC_BUF(mac); 397 DECLARE_MAC_BUF(mac);
398 398
399 spin_lock_bh(&sta->ampdu_mlme.ampdu_tx); 399 spin_lock_bh(&sta->ampdu_mlme.ampdu_tx);
400 state = sta->ampdu_mlme.tid_tx[tid].state; 400 state = sta->ampdu_mlme.tid_state_tx[tid];
401 spin_unlock_bh(&sta->ampdu_mlme.ampdu_tx); 401 spin_unlock_bh(&sta->ampdu_mlme.ampdu_tx);
402 402
403 if (state == HT_AGG_STATE_IDLE && 403 if (state == HT_AGG_STATE_IDLE &&
diff --git a/net/mac80211/debugfs_sta.c b/net/mac80211/debugfs_sta.c
index 62354de0199f..256ea880d28b 100644
--- a/net/mac80211/debugfs_sta.c
+++ b/net/mac80211/debugfs_sta.c
@@ -169,27 +169,30 @@ static ssize_t sta_agg_status_read(struct file *file, char __user *userbuf,
169 p += scnprintf(p, sizeof(buf)+buf-p, "\n RX :"); 169 p += scnprintf(p, sizeof(buf)+buf-p, "\n RX :");
170 for (i = 0; i < STA_TID_NUM; i++) 170 for (i = 0; i < STA_TID_NUM; i++)
171 p += scnprintf(p, sizeof(buf)+buf-p, "%5d", 171 p += scnprintf(p, sizeof(buf)+buf-p, "%5d",
172 sta->ampdu_mlme.tid_rx[i].state); 172 sta->ampdu_mlme.tid_state_rx[i]);
173 173
174 p += scnprintf(p, sizeof(buf)+buf-p, "\n DTKN:"); 174 p += scnprintf(p, sizeof(buf)+buf-p, "\n DTKN:");
175 for (i = 0; i < STA_TID_NUM; i++) 175 for (i = 0; i < STA_TID_NUM; i++)
176 p += scnprintf(p, sizeof(buf)+buf-p, "%5d", 176 p += scnprintf(p, sizeof(buf)+buf-p, "%5d",
177 sta->ampdu_mlme.tid_rx[i].dialog_token); 177 sta->ampdu_mlme.tid_state_rx[i]?
178 sta->ampdu_mlme.tid_rx[i]->dialog_token : 0);
178 179
179 p += scnprintf(p, sizeof(buf)+buf-p, "\n TX :"); 180 p += scnprintf(p, sizeof(buf)+buf-p, "\n TX :");
180 for (i = 0; i < STA_TID_NUM; i++) 181 for (i = 0; i < STA_TID_NUM; i++)
181 p += scnprintf(p, sizeof(buf)+buf-p, "%5d", 182 p += scnprintf(p, sizeof(buf)+buf-p, "%5d",
182 sta->ampdu_mlme.tid_tx[i].state); 183 sta->ampdu_mlme.tid_state_tx[i]);
183 184
184 p += scnprintf(p, sizeof(buf)+buf-p, "\n DTKN:"); 185 p += scnprintf(p, sizeof(buf)+buf-p, "\n DTKN:");
185 for (i = 0; i < STA_TID_NUM; i++) 186 for (i = 0; i < STA_TID_NUM; i++)
186 p += scnprintf(p, sizeof(buf)+buf-p, "%5d", 187 p += scnprintf(p, sizeof(buf)+buf-p, "%5d",
187 sta->ampdu_mlme.tid_tx[i].dialog_token); 188 sta->ampdu_mlme.tid_state_tx[i]?
189 sta->ampdu_mlme.tid_tx[i]->dialog_token : 0);
188 190
189 p += scnprintf(p, sizeof(buf)+buf-p, "\n SSN :"); 191 p += scnprintf(p, sizeof(buf)+buf-p, "\n SSN :");
190 for (i = 0; i < STA_TID_NUM; i++) 192 for (i = 0; i < STA_TID_NUM; i++)
191 p += scnprintf(p, sizeof(buf)+buf-p, "%5d", 193 p += scnprintf(p, sizeof(buf)+buf-p, "%5d",
192 sta->ampdu_mlme.tid_tx[i].ssn); 194 sta->ampdu_mlme.tid_state_tx[i]?
195 sta->ampdu_mlme.tid_tx[i]->ssn : 0);
193 196
194 p += scnprintf(p, sizeof(buf)+buf-p, "\n"); 197 p += scnprintf(p, sizeof(buf)+buf-p, "\n");
195 198
@@ -230,12 +233,12 @@ static ssize_t sta_agg_status_write(struct file *file,
230 strcpy(state, "off "); 233 strcpy(state, "off ");
231 ieee80211_sta_stop_rx_ba_session(dev, da, tid_num, 0, 234 ieee80211_sta_stop_rx_ba_session(dev, da, tid_num, 0,
232 WLAN_REASON_QSTA_REQUIRE_SETUP); 235 WLAN_REASON_QSTA_REQUIRE_SETUP);
233 sta->ampdu_mlme.tid_rx[tid_num].state |= 236 sta->ampdu_mlme.tid_state_rx[tid_num] |=
234 HT_AGG_STATE_DEBUGFS_CTL; 237 HT_AGG_STATE_DEBUGFS_CTL;
235 tid_static_rx[tid_num] = 0; 238 tid_static_rx[tid_num] = 0;
236 } else { 239 } else {
237 strcpy(state, "on "); 240 strcpy(state, "on ");
238 sta->ampdu_mlme.tid_rx[tid_num].state &= 241 sta->ampdu_mlme.tid_state_rx[tid_num] &=
239 ~HT_AGG_STATE_DEBUGFS_CTL; 242 ~HT_AGG_STATE_DEBUGFS_CTL;
240 tid_static_rx[tid_num] = 1; 243 tid_static_rx[tid_num] = 1;
241 } 244 }
diff --git a/net/mac80211/ieee80211.c b/net/mac80211/ieee80211.c
index 616ce10d2a38..8c0f782d21e3 100644
--- a/net/mac80211/ieee80211.c
+++ b/net/mac80211/ieee80211.c
@@ -569,12 +569,12 @@ int ieee80211_start_tx_ba_session(struct ieee80211_hw *hw, u8 *ra, u16 tid)
569 spin_lock_bh(&sta->ampdu_mlme.ampdu_tx); 569 spin_lock_bh(&sta->ampdu_mlme.ampdu_tx);
570 570
571 /* we have tried too many times, receiver does not want A-MPDU */ 571 /* we have tried too many times, receiver does not want A-MPDU */
572 if (sta->ampdu_mlme.tid_tx[tid].addba_req_num > HT_AGG_MAX_RETRIES) { 572 if (sta->ampdu_mlme.addba_req_num[tid] > HT_AGG_MAX_RETRIES) {
573 ret = -EBUSY; 573 ret = -EBUSY;
574 goto start_ba_exit; 574 goto start_ba_exit;
575 } 575 }
576 576
577 state = &sta->ampdu_mlme.tid_tx[tid].state; 577 state = &sta->ampdu_mlme.tid_state_tx[tid];
578 /* check if the TID is not in aggregation flow already */ 578 /* check if the TID is not in aggregation flow already */
579 if (*state != HT_AGG_STATE_IDLE) { 579 if (*state != HT_AGG_STATE_IDLE) {
580#ifdef CONFIG_MAC80211_HT_DEBUG 580#ifdef CONFIG_MAC80211_HT_DEBUG
@@ -585,6 +585,23 @@ int ieee80211_start_tx_ba_session(struct ieee80211_hw *hw, u8 *ra, u16 tid)
585 goto start_ba_exit; 585 goto start_ba_exit;
586 } 586 }
587 587
588 /* prepare A-MPDU MLME for Tx aggregation */
589 sta->ampdu_mlme.tid_tx[tid] =
590 kmalloc(sizeof(struct tid_ampdu_tx), GFP_ATOMIC);
591 if (!sta->ampdu_mlme.tid_tx[tid]) {
592 if (net_ratelimit())
593 printk(KERN_ERR "allocate tx mlme to tid %d failed\n",
594 tid);
595 ret = -ENOMEM;
596 goto start_ba_exit;
597 }
598 /* Tx timer */
599 sta->ampdu_mlme.tid_tx[tid]->addba_resp_timer.function =
600 sta_addba_resp_timer_expired;
601 sta->ampdu_mlme.tid_tx[tid]->addba_resp_timer.data =
602 (unsigned long)&sta->timer_to_tid[tid];
603 init_timer(&sta->ampdu_mlme.tid_tx[tid]->addba_resp_timer);
604
588 /* ensure that TX flow won't interrupt us 605 /* ensure that TX flow won't interrupt us
589 * until the end of the call to requeue function */ 606 * until the end of the call to requeue function */
590 spin_lock_bh(&local->mdev->queue_lock); 607 spin_lock_bh(&local->mdev->queue_lock);
@@ -596,11 +613,10 @@ int ieee80211_start_tx_ba_session(struct ieee80211_hw *hw, u8 *ra, u16 tid)
596 * don't switch to aggregation */ 613 * don't switch to aggregation */
597 if (ret) { 614 if (ret) {
598#ifdef CONFIG_MAC80211_HT_DEBUG 615#ifdef CONFIG_MAC80211_HT_DEBUG
599 printk(KERN_DEBUG "BA request denied - no queue available for" 616 printk(KERN_DEBUG "BA request denied - queue unavailable for"
600 " tid %d\n", tid); 617 " tid %d\n", tid);
601#endif /* CONFIG_MAC80211_HT_DEBUG */ 618#endif /* CONFIG_MAC80211_HT_DEBUG */
602 spin_unlock_bh(&local->mdev->queue_lock); 619 goto start_ba_err;
603 goto start_ba_exit;
604 } 620 }
605 sdata = sta->sdata; 621 sdata = sta->sdata;
606 622
@@ -618,38 +634,40 @@ int ieee80211_start_tx_ba_session(struct ieee80211_hw *hw, u8 *ra, u16 tid)
618 * allocated queue */ 634 * allocated queue */
619 ieee80211_ht_agg_queue_remove(local, sta, tid, 0); 635 ieee80211_ht_agg_queue_remove(local, sta, tid, 0);
620#ifdef CONFIG_MAC80211_HT_DEBUG 636#ifdef CONFIG_MAC80211_HT_DEBUG
621 printk(KERN_DEBUG "BA request denied - HW or queue unavailable" 637 printk(KERN_DEBUG "BA request denied - HW unavailable for"
622 " for tid %d\n", tid); 638 " tid %d\n", tid);
623#endif /* CONFIG_MAC80211_HT_DEBUG */ 639#endif /* CONFIG_MAC80211_HT_DEBUG */
624 spin_unlock_bh(&local->mdev->queue_lock);
625 *state = HT_AGG_STATE_IDLE; 640 *state = HT_AGG_STATE_IDLE;
626 goto start_ba_exit; 641 goto start_ba_err;
627 } 642 }
628 643
629 /* Will put all the packets in the new SW queue */ 644 /* Will put all the packets in the new SW queue */
630 ieee80211_requeue(local, ieee802_1d_to_ac[tid]); 645 ieee80211_requeue(local, ieee802_1d_to_ac[tid]);
631 spin_unlock_bh(&local->mdev->queue_lock); 646 spin_unlock_bh(&local->mdev->queue_lock);
632 647
633 /* We have most probably almost emptied the legacy queue */
634 /* ieee80211_wake_queue(local_to_hw(local), ieee802_1d_to_ac[tid]); */
635
636 /* send an addBA request */ 648 /* send an addBA request */
637 sta->ampdu_mlme.dialog_token_allocator++; 649 sta->ampdu_mlme.dialog_token_allocator++;
638 sta->ampdu_mlme.tid_tx[tid].dialog_token = 650 sta->ampdu_mlme.tid_tx[tid]->dialog_token =
639 sta->ampdu_mlme.dialog_token_allocator; 651 sta->ampdu_mlme.dialog_token_allocator;
640 sta->ampdu_mlme.tid_tx[tid].ssn = start_seq_num; 652 sta->ampdu_mlme.tid_tx[tid]->ssn = start_seq_num;
641 653
642 ieee80211_send_addba_request(sta->sdata->dev, ra, tid, 654 ieee80211_send_addba_request(sta->sdata->dev, ra, tid,
643 sta->ampdu_mlme.tid_tx[tid].dialog_token, 655 sta->ampdu_mlme.tid_tx[tid]->dialog_token,
644 sta->ampdu_mlme.tid_tx[tid].ssn, 656 sta->ampdu_mlme.tid_tx[tid]->ssn,
645 0x40, 5000); 657 0x40, 5000);
646 658
647 /* activate the timer for the recipient's addBA response */ 659 /* activate the timer for the recipient's addBA response */
648 sta->ampdu_mlme.tid_tx[tid].addba_resp_timer.expires = 660 sta->ampdu_mlme.tid_tx[tid]->addba_resp_timer.expires =
649 jiffies + ADDBA_RESP_INTERVAL; 661 jiffies + ADDBA_RESP_INTERVAL;
650 add_timer(&sta->ampdu_mlme.tid_tx[tid].addba_resp_timer); 662 add_timer(&sta->ampdu_mlme.tid_tx[tid]->addba_resp_timer);
651 printk(KERN_DEBUG "activated addBA response timer on tid %d\n", tid); 663 printk(KERN_DEBUG "activated addBA response timer on tid %d\n", tid);
664 goto start_ba_exit;
652 665
666start_ba_err:
667 kfree(sta->ampdu_mlme.tid_tx[tid]);
668 sta->ampdu_mlme.tid_tx[tid] = NULL;
669 spin_unlock_bh(&local->mdev->queue_lock);
670 ret = -EBUSY;
653start_ba_exit: 671start_ba_exit:
654 spin_unlock_bh(&sta->ampdu_mlme.ampdu_tx); 672 spin_unlock_bh(&sta->ampdu_mlme.ampdu_tx);
655 rcu_read_unlock(); 673 rcu_read_unlock();
@@ -683,7 +701,7 @@ int ieee80211_stop_tx_ba_session(struct ieee80211_hw *hw,
683 } 701 }
684 702
685 /* check if the TID is in aggregation */ 703 /* check if the TID is in aggregation */
686 state = &sta->ampdu_mlme.tid_tx[tid].state; 704 state = &sta->ampdu_mlme.tid_state_tx[tid];
687 spin_lock_bh(&sta->ampdu_mlme.ampdu_tx); 705 spin_lock_bh(&sta->ampdu_mlme.ampdu_tx);
688 706
689 if (*state != HT_AGG_STATE_OPERATIONAL) { 707 if (*state != HT_AGG_STATE_OPERATIONAL) {
@@ -741,7 +759,7 @@ void ieee80211_start_tx_ba_cb(struct ieee80211_hw *hw, u8 *ra, u16 tid)
741 return; 759 return;
742 } 760 }
743 761
744 state = &sta->ampdu_mlme.tid_tx[tid].state; 762 state = &sta->ampdu_mlme.tid_state_tx[tid];
745 spin_lock_bh(&sta->ampdu_mlme.ampdu_tx); 763 spin_lock_bh(&sta->ampdu_mlme.ampdu_tx);
746 764
747 if (!(*state & HT_ADDBA_REQUESTED_MSK)) { 765 if (!(*state & HT_ADDBA_REQUESTED_MSK)) {
@@ -790,7 +808,7 @@ void ieee80211_stop_tx_ba_cb(struct ieee80211_hw *hw, u8 *ra, u8 tid)
790 rcu_read_unlock(); 808 rcu_read_unlock();
791 return; 809 return;
792 } 810 }
793 state = &sta->ampdu_mlme.tid_tx[tid].state; 811 state = &sta->ampdu_mlme.tid_state_tx[tid];
794 812
795 spin_lock_bh(&sta->ampdu_mlme.ampdu_tx); 813 spin_lock_bh(&sta->ampdu_mlme.ampdu_tx);
796 if ((*state & HT_AGG_STATE_REQ_STOP_BA_MSK) == 0) { 814 if ((*state & HT_AGG_STATE_REQ_STOP_BA_MSK) == 0) {
@@ -819,7 +837,9 @@ void ieee80211_stop_tx_ba_cb(struct ieee80211_hw *hw, u8 *ra, u8 tid)
819 * necessarily stopped */ 837 * necessarily stopped */
820 netif_schedule(local->mdev); 838 netif_schedule(local->mdev);
821 *state = HT_AGG_STATE_IDLE; 839 *state = HT_AGG_STATE_IDLE;
822 sta->ampdu_mlme.tid_tx[tid].addba_req_num = 0; 840 sta->ampdu_mlme.addba_req_num[tid] = 0;
841 kfree(sta->ampdu_mlme.tid_tx[tid]);
842 sta->ampdu_mlme.tid_tx[tid] = NULL;
823 spin_unlock_bh(&sta->ampdu_mlme.ampdu_tx); 843 spin_unlock_bh(&sta->ampdu_mlme.ampdu_tx);
824 844
825 rcu_read_unlock(); 845 rcu_read_unlock();
diff --git a/net/mac80211/ieee80211_sta.c b/net/mac80211/ieee80211_sta.c
index cf51ca6804dd..f9cf2f187893 100644
--- a/net/mac80211/ieee80211_sta.c
+++ b/net/mac80211/ieee80211_sta.c
@@ -1216,12 +1216,11 @@ static void ieee80211_sta_process_addba_request(struct net_device *dev,
1216 buf_size = buf_size << sband->ht_info.ampdu_factor; 1216 buf_size = buf_size << sband->ht_info.ampdu_factor;
1217 } 1217 }
1218 1218
1219 tid_agg_rx = &sta->ampdu_mlme.tid_rx[tid];
1220 1219
1221 /* examine state machine */ 1220 /* examine state machine */
1222 spin_lock_bh(&sta->ampdu_mlme.ampdu_rx); 1221 spin_lock_bh(&sta->ampdu_mlme.ampdu_rx);
1223 1222
1224 if (tid_agg_rx->state != HT_AGG_STATE_IDLE) { 1223 if (sta->ampdu_mlme.tid_state_rx[tid] != HT_AGG_STATE_IDLE) {
1225#ifdef CONFIG_MAC80211_HT_DEBUG 1224#ifdef CONFIG_MAC80211_HT_DEBUG
1226 if (net_ratelimit()) 1225 if (net_ratelimit())
1227 printk(KERN_DEBUG "unexpected AddBA Req from " 1226 printk(KERN_DEBUG "unexpected AddBA Req from "
@@ -1231,6 +1230,24 @@ static void ieee80211_sta_process_addba_request(struct net_device *dev,
1231 goto end; 1230 goto end;
1232 } 1231 }
1233 1232
1233 /* prepare A-MPDU MLME for Rx aggregation */
1234 sta->ampdu_mlme.tid_rx[tid] =
1235 kmalloc(sizeof(struct tid_ampdu_rx), GFP_ATOMIC);
1236 if (!sta->ampdu_mlme.tid_rx[tid]) {
1237 if (net_ratelimit())
1238 printk(KERN_ERR "allocate rx mlme to tid %d failed\n",
1239 tid);
1240 goto end;
1241 }
1242 /* rx timer */
1243 sta->ampdu_mlme.tid_rx[tid]->session_timer.function =
1244 sta_rx_agg_session_timer_expired;
1245 sta->ampdu_mlme.tid_rx[tid]->session_timer.data =
1246 (unsigned long)&sta->timer_to_tid[tid];
1247 init_timer(&sta->ampdu_mlme.tid_rx[tid]->session_timer);
1248
1249 tid_agg_rx = sta->ampdu_mlme.tid_rx[tid];
1250
1234 /* prepare reordering buffer */ 1251 /* prepare reordering buffer */
1235 tid_agg_rx->reorder_buf = 1252 tid_agg_rx->reorder_buf =
1236 kmalloc(buf_size * sizeof(struct sk_buf *), GFP_ATOMIC); 1253 kmalloc(buf_size * sizeof(struct sk_buf *), GFP_ATOMIC);
@@ -1238,6 +1255,7 @@ static void ieee80211_sta_process_addba_request(struct net_device *dev,
1238 if (net_ratelimit()) 1255 if (net_ratelimit())
1239 printk(KERN_ERR "can not allocate reordering buffer " 1256 printk(KERN_ERR "can not allocate reordering buffer "
1240 "to tid %d\n", tid); 1257 "to tid %d\n", tid);
1258 kfree(sta->ampdu_mlme.tid_rx[tid]);
1241 goto end; 1259 goto end;
1242 } 1260 }
1243 memset(tid_agg_rx->reorder_buf, 0, 1261 memset(tid_agg_rx->reorder_buf, 0,
@@ -1252,11 +1270,13 @@ static void ieee80211_sta_process_addba_request(struct net_device *dev,
1252 1270
1253 if (ret) { 1271 if (ret) {
1254 kfree(tid_agg_rx->reorder_buf); 1272 kfree(tid_agg_rx->reorder_buf);
1273 kfree(tid_agg_rx);
1274 sta->ampdu_mlme.tid_rx[tid] = NULL;
1255 goto end; 1275 goto end;
1256 } 1276 }
1257 1277
1258 /* change state and send addba resp */ 1278 /* change state and send addba resp */
1259 tid_agg_rx->state = HT_AGG_STATE_OPERATIONAL; 1279 sta->ampdu_mlme.tid_state_rx[tid] = HT_AGG_STATE_OPERATIONAL;
1260 tid_agg_rx->dialog_token = dialog_token; 1280 tid_agg_rx->dialog_token = dialog_token;
1261 tid_agg_rx->ssn = start_seq_num; 1281 tid_agg_rx->ssn = start_seq_num;
1262 tid_agg_rx->head_seq_num = start_seq_num; 1282 tid_agg_rx->head_seq_num = start_seq_num;
@@ -1295,39 +1315,37 @@ static void ieee80211_sta_process_addba_resp(struct net_device *dev,
1295 capab = le16_to_cpu(mgmt->u.action.u.addba_resp.capab); 1315 capab = le16_to_cpu(mgmt->u.action.u.addba_resp.capab);
1296 tid = (capab & IEEE80211_ADDBA_PARAM_TID_MASK) >> 2; 1316 tid = (capab & IEEE80211_ADDBA_PARAM_TID_MASK) >> 2;
1297 1317
1298 state = &sta->ampdu_mlme.tid_tx[tid].state; 1318 state = &sta->ampdu_mlme.tid_state_tx[tid];
1299 1319
1300 spin_lock_bh(&sta->ampdu_mlme.ampdu_tx); 1320 spin_lock_bh(&sta->ampdu_mlme.ampdu_tx);
1301 1321
1322 if (!(*state & HT_ADDBA_REQUESTED_MSK)) {
1323 spin_unlock_bh(&sta->ampdu_mlme.ampdu_tx);
1324 printk(KERN_DEBUG "state not HT_ADDBA_REQUESTED_MSK:"
1325 "%d\n", *state);
1326 goto addba_resp_exit;
1327 }
1328
1302 if (mgmt->u.action.u.addba_resp.dialog_token != 1329 if (mgmt->u.action.u.addba_resp.dialog_token !=
1303 sta->ampdu_mlme.tid_tx[tid].dialog_token) { 1330 sta->ampdu_mlme.tid_tx[tid]->dialog_token) {
1304 spin_unlock_bh(&sta->ampdu_mlme.ampdu_tx); 1331 spin_unlock_bh(&sta->ampdu_mlme.ampdu_tx);
1305#ifdef CONFIG_MAC80211_HT_DEBUG 1332#ifdef CONFIG_MAC80211_HT_DEBUG
1306 printk(KERN_DEBUG "wrong addBA response token, tid %d\n", tid); 1333 printk(KERN_DEBUG "wrong addBA response token, tid %d\n", tid);
1307#endif /* CONFIG_MAC80211_HT_DEBUG */ 1334#endif /* CONFIG_MAC80211_HT_DEBUG */
1308 rcu_read_unlock(); 1335 goto addba_resp_exit;
1309 return;
1310 } 1336 }
1311 1337
1312 del_timer_sync(&sta->ampdu_mlme.tid_tx[tid].addba_resp_timer); 1338 del_timer_sync(&sta->ampdu_mlme.tid_tx[tid]->addba_resp_timer);
1313#ifdef CONFIG_MAC80211_HT_DEBUG 1339#ifdef CONFIG_MAC80211_HT_DEBUG
1314 printk(KERN_DEBUG "switched off addBA timer for tid %d \n", tid); 1340 printk(KERN_DEBUG "switched off addBA timer for tid %d \n", tid);
1315#endif /* CONFIG_MAC80211_HT_DEBUG */ 1341#endif /* CONFIG_MAC80211_HT_DEBUG */
1316 if (le16_to_cpu(mgmt->u.action.u.addba_resp.status) 1342 if (le16_to_cpu(mgmt->u.action.u.addba_resp.status)
1317 == WLAN_STATUS_SUCCESS) { 1343 == WLAN_STATUS_SUCCESS) {
1318 if (!(*state & HT_ADDBA_REQUESTED_MSK)) {
1319 spin_unlock_bh(&sta->ampdu_mlme.ampdu_tx);
1320 printk(KERN_DEBUG "state not HT_ADDBA_REQUESTED_MSK:"
1321 "%d\n", *state);
1322 rcu_read_unlock();
1323 return;
1324 }
1325
1326 if (*state & HT_ADDBA_RECEIVED_MSK) 1344 if (*state & HT_ADDBA_RECEIVED_MSK)
1327 printk(KERN_DEBUG "double addBA response\n"); 1345 printk(KERN_DEBUG "double addBA response\n");
1328 1346
1329 *state |= HT_ADDBA_RECEIVED_MSK; 1347 *state |= HT_ADDBA_RECEIVED_MSK;
1330 sta->ampdu_mlme.tid_tx[tid].addba_req_num = 0; 1348 sta->ampdu_mlme.addba_req_num[tid] = 0;
1331 1349
1332 if (*state == HT_AGG_STATE_OPERATIONAL) { 1350 if (*state == HT_AGG_STATE_OPERATIONAL) {
1333 printk(KERN_DEBUG "Aggregation on for tid %d \n", tid); 1351 printk(KERN_DEBUG "Aggregation on for tid %d \n", tid);
@@ -1339,13 +1357,15 @@ static void ieee80211_sta_process_addba_resp(struct net_device *dev,
1339 } else { 1357 } else {
1340 printk(KERN_DEBUG "recipient rejected agg: tid %d \n", tid); 1358 printk(KERN_DEBUG "recipient rejected agg: tid %d \n", tid);
1341 1359
1342 sta->ampdu_mlme.tid_tx[tid].addba_req_num++; 1360 sta->ampdu_mlme.addba_req_num[tid]++;
1343 /* this will allow the state check in stop_BA_session */ 1361 /* this will allow the state check in stop_BA_session */
1344 *state = HT_AGG_STATE_OPERATIONAL; 1362 *state = HT_AGG_STATE_OPERATIONAL;
1345 spin_unlock_bh(&sta->ampdu_mlme.ampdu_tx); 1363 spin_unlock_bh(&sta->ampdu_mlme.ampdu_tx);
1346 ieee80211_stop_tx_ba_session(hw, sta->addr, tid, 1364 ieee80211_stop_tx_ba_session(hw, sta->addr, tid,
1347 WLAN_BACK_INITIATOR); 1365 WLAN_BACK_INITIATOR);
1348 } 1366 }
1367
1368addba_resp_exit:
1349 rcu_read_unlock(); 1369 rcu_read_unlock();
1350} 1370}
1351 1371
@@ -1411,13 +1431,13 @@ void ieee80211_sta_stop_rx_ba_session(struct net_device *dev, u8 *ra, u16 tid,
1411 1431
1412 /* check if TID is in operational state */ 1432 /* check if TID is in operational state */
1413 spin_lock_bh(&sta->ampdu_mlme.ampdu_rx); 1433 spin_lock_bh(&sta->ampdu_mlme.ampdu_rx);
1414 if (sta->ampdu_mlme.tid_rx[tid].state 1434 if (sta->ampdu_mlme.tid_state_rx[tid]
1415 != HT_AGG_STATE_OPERATIONAL) { 1435 != HT_AGG_STATE_OPERATIONAL) {
1416 spin_unlock_bh(&sta->ampdu_mlme.ampdu_rx); 1436 spin_unlock_bh(&sta->ampdu_mlme.ampdu_rx);
1417 rcu_read_unlock(); 1437 rcu_read_unlock();
1418 return; 1438 return;
1419 } 1439 }
1420 sta->ampdu_mlme.tid_rx[tid].state = 1440 sta->ampdu_mlme.tid_state_rx[tid] =
1421 HT_AGG_STATE_REQ_STOP_BA_MSK | 1441 HT_AGG_STATE_REQ_STOP_BA_MSK |
1422 (initiator << HT_AGG_STATE_INITIATOR_SHIFT); 1442 (initiator << HT_AGG_STATE_INITIATOR_SHIFT);
1423 spin_unlock_bh(&sta->ampdu_mlme.ampdu_rx); 1443 spin_unlock_bh(&sta->ampdu_mlme.ampdu_rx);
@@ -1434,25 +1454,27 @@ void ieee80211_sta_stop_rx_ba_session(struct net_device *dev, u8 *ra, u16 tid,
1434 1454
1435 /* shutdown timer has not expired */ 1455 /* shutdown timer has not expired */
1436 if (initiator != WLAN_BACK_TIMER) 1456 if (initiator != WLAN_BACK_TIMER)
1437 del_timer_sync(&sta->ampdu_mlme.tid_rx[tid]. 1457 del_timer_sync(&sta->ampdu_mlme.tid_rx[tid]->session_timer);
1438 session_timer);
1439 1458
1440 /* check if this is a self generated aggregation halt */ 1459 /* check if this is a self generated aggregation halt */
1441 if (initiator == WLAN_BACK_RECIPIENT || initiator == WLAN_BACK_TIMER) 1460 if (initiator == WLAN_BACK_RECIPIENT || initiator == WLAN_BACK_TIMER)
1442 ieee80211_send_delba(dev, ra, tid, 0, reason); 1461 ieee80211_send_delba(dev, ra, tid, 0, reason);
1443 1462
1444 /* free the reordering buffer */ 1463 /* free the reordering buffer */
1445 for (i = 0; i < sta->ampdu_mlme.tid_rx[tid].buf_size; i++) { 1464 for (i = 0; i < sta->ampdu_mlme.tid_rx[tid]->buf_size; i++) {
1446 if (sta->ampdu_mlme.tid_rx[tid].reorder_buf[i]) { 1465 if (sta->ampdu_mlme.tid_rx[tid]->reorder_buf[i]) {
1447 /* release the reordered frames */ 1466 /* release the reordered frames */
1448 dev_kfree_skb(sta->ampdu_mlme.tid_rx[tid].reorder_buf[i]); 1467 dev_kfree_skb(sta->ampdu_mlme.tid_rx[tid]->reorder_buf[i]);
1449 sta->ampdu_mlme.tid_rx[tid].stored_mpdu_num--; 1468 sta->ampdu_mlme.tid_rx[tid]->stored_mpdu_num--;
1450 sta->ampdu_mlme.tid_rx[tid].reorder_buf[i] = NULL; 1469 sta->ampdu_mlme.tid_rx[tid]->reorder_buf[i] = NULL;
1451 } 1470 }
1452 } 1471 }
1453 kfree(sta->ampdu_mlme.tid_rx[tid].reorder_buf); 1472 /* free resources */
1473 kfree(sta->ampdu_mlme.tid_rx[tid]->reorder_buf);
1474 kfree(sta->ampdu_mlme.tid_rx[tid]);
1475 sta->ampdu_mlme.tid_rx[tid] = NULL;
1476 sta->ampdu_mlme.tid_state_rx[tid] = HT_AGG_STATE_IDLE;
1454 1477
1455 sta->ampdu_mlme.tid_rx[tid].state = HT_AGG_STATE_IDLE;
1456 rcu_read_unlock(); 1478 rcu_read_unlock();
1457} 1479}
1458 1480
@@ -1491,7 +1513,7 @@ static void ieee80211_sta_process_delba(struct net_device *dev,
1491 WLAN_BACK_INITIATOR, 0); 1513 WLAN_BACK_INITIATOR, 0);
1492 else { /* WLAN_BACK_RECIPIENT */ 1514 else { /* WLAN_BACK_RECIPIENT */
1493 spin_lock_bh(&sta->ampdu_mlme.ampdu_tx); 1515 spin_lock_bh(&sta->ampdu_mlme.ampdu_tx);
1494 sta->ampdu_mlme.tid_tx[tid].state = 1516 sta->ampdu_mlme.tid_state_tx[tid] =
1495 HT_AGG_STATE_OPERATIONAL; 1517 HT_AGG_STATE_OPERATIONAL;
1496 spin_unlock_bh(&sta->ampdu_mlme.ampdu_tx); 1518 spin_unlock_bh(&sta->ampdu_mlme.ampdu_tx);
1497 ieee80211_stop_tx_ba_session(&local->hw, sta->addr, tid, 1519 ieee80211_stop_tx_ba_session(&local->hw, sta->addr, tid,
@@ -1528,7 +1550,7 @@ void sta_addba_resp_timer_expired(unsigned long data)
1528 return; 1550 return;
1529 } 1551 }
1530 1552
1531 state = &sta->ampdu_mlme.tid_tx[tid].state; 1553 state = &sta->ampdu_mlme.tid_state_tx[tid];
1532 /* check if the TID waits for addBA response */ 1554 /* check if the TID waits for addBA response */
1533 spin_lock_bh(&sta->ampdu_mlme.ampdu_tx); 1555 spin_lock_bh(&sta->ampdu_mlme.ampdu_tx);
1534 if (!(*state & HT_ADDBA_REQUESTED_MSK)) { 1556 if (!(*state & HT_ADDBA_REQUESTED_MSK)) {
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index 644d2774469d..d9c6ed5be4fc 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -1514,9 +1514,10 @@ ieee80211_rx_h_ctrl(struct ieee80211_rx_data *rx)
1514 if (!rx->sta) 1514 if (!rx->sta)
1515 return RX_CONTINUE; 1515 return RX_CONTINUE;
1516 tid = le16_to_cpu(bar->control) >> 12; 1516 tid = le16_to_cpu(bar->control) >> 12;
1517 tid_agg_rx = &(rx->sta->ampdu_mlme.tid_rx[tid]); 1517 if (rx->sta->ampdu_mlme.tid_state_rx[tid]
1518 if (tid_agg_rx->state != HT_AGG_STATE_OPERATIONAL) 1518 != HT_AGG_STATE_OPERATIONAL)
1519 return RX_CONTINUE; 1519 return RX_CONTINUE;
1520 tid_agg_rx = rx->sta->ampdu_mlme.tid_rx[tid];
1520 1521
1521 start_seq_num = le16_to_cpu(bar->start_seq_num) >> 4; 1522 start_seq_num = le16_to_cpu(bar->start_seq_num) >> 4;
1522 1523
@@ -2123,11 +2124,12 @@ static u8 ieee80211_rx_reorder_ampdu(struct ieee80211_local *local,
2123 2124
2124 qc = skb->data + ieee80211_get_hdrlen(fc) - QOS_CONTROL_LEN; 2125 qc = skb->data + ieee80211_get_hdrlen(fc) - QOS_CONTROL_LEN;
2125 tid = qc[0] & QOS_CONTROL_TID_MASK; 2126 tid = qc[0] & QOS_CONTROL_TID_MASK;
2126 tid_agg_rx = &(sta->ampdu_mlme.tid_rx[tid]);
2127 2127
2128 if (tid_agg_rx->state != HT_AGG_STATE_OPERATIONAL) 2128 if (sta->ampdu_mlme.tid_state_rx[tid] != HT_AGG_STATE_OPERATIONAL)
2129 goto end_reorder; 2129 goto end_reorder;
2130 2130
2131 tid_agg_rx = sta->ampdu_mlme.tid_rx[tid];
2132
2131 /* null data frames are excluded */ 2133 /* null data frames are excluded */
2132 if (unlikely(fc & IEEE80211_STYPE_NULLFUNC)) 2134 if (unlikely(fc & IEEE80211_STYPE_NULLFUNC))
2133 goto end_reorder; 2135 goto end_reorder;
diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c
index 3b84c16cf054..f708367092d1 100644
--- a/net/mac80211/sta_info.c
+++ b/net/mac80211/sta_info.c
@@ -170,9 +170,16 @@ void sta_info_destroy(struct sta_info *sta)
170 dev_kfree_skb_any(skb); 170 dev_kfree_skb_any(skb);
171 171
172 for (i = 0; i < STA_TID_NUM; i++) { 172 for (i = 0; i < STA_TID_NUM; i++) {
173 del_timer_sync(&sta->ampdu_mlme.tid_rx[i].session_timer); 173 spin_lock_bh(&sta->ampdu_mlme.ampdu_rx);
174 del_timer_sync(&sta->ampdu_mlme.tid_tx[i].addba_resp_timer); 174 if (sta->ampdu_mlme.tid_rx[i])
175 del_timer_sync(&sta->ampdu_mlme.tid_rx[i]->session_timer);
176 spin_unlock_bh(&sta->ampdu_mlme.ampdu_rx);
177 spin_lock_bh(&sta->ampdu_mlme.ampdu_tx);
178 if (sta->ampdu_mlme.tid_tx[i])
179 del_timer_sync(&sta->ampdu_mlme.tid_tx[i]->addba_resp_timer);
180 spin_unlock_bh(&sta->ampdu_mlme.ampdu_tx);
175 } 181 }
182
176 rate_control_free_sta(sta->rate_ctrl, sta->rate_ctrl_priv); 183 rate_control_free_sta(sta->rate_ctrl, sta->rate_ctrl_priv);
177 rate_control_put(sta->rate_ctrl); 184 rate_control_put(sta->rate_ctrl);
178 185
@@ -227,18 +234,13 @@ struct sta_info *sta_info_alloc(struct ieee80211_sub_if_data *sdata,
227 sta->timer_to_tid[i] = i; 234 sta->timer_to_tid[i] = i;
228 /* tid to tx queue: initialize according to HW (0 is valid) */ 235 /* tid to tx queue: initialize according to HW (0 is valid) */
229 sta->tid_to_tx_q[i] = local->hw.queues; 236 sta->tid_to_tx_q[i] = local->hw.queues;
230 /* rx timers */ 237 /* rx */
231 sta->ampdu_mlme.tid_rx[i].session_timer.function = 238 sta->ampdu_mlme.tid_state_rx[i] = HT_AGG_STATE_IDLE;
232 sta_rx_agg_session_timer_expired; 239 sta->ampdu_mlme.tid_rx[i] = NULL;
233 sta->ampdu_mlme.tid_rx[i].session_timer.data = 240 /* tx */
234 (unsigned long)&sta->timer_to_tid[i]; 241 sta->ampdu_mlme.tid_state_tx[i] = HT_AGG_STATE_IDLE;
235 init_timer(&sta->ampdu_mlme.tid_rx[i].session_timer); 242 sta->ampdu_mlme.tid_tx[i] = NULL;
236 /* tx timers */ 243 sta->ampdu_mlme.addba_req_num[i] = 0;
237 sta->ampdu_mlme.tid_tx[i].addba_resp_timer.function =
238 sta_addba_resp_timer_expired;
239 sta->ampdu_mlme.tid_tx[i].addba_resp_timer.data =
240 (unsigned long)&sta->timer_to_tid[i];
241 init_timer(&sta->ampdu_mlme.tid_tx[i].addba_resp_timer);
242 } 244 }
243 skb_queue_head_init(&sta->ps_tx_buf); 245 skb_queue_head_init(&sta->ps_tx_buf);
244 skb_queue_head_init(&sta->tx_filtered); 246 skb_queue_head_init(&sta->tx_filtered);
diff --git a/net/mac80211/sta_info.h b/net/mac80211/sta_info.h
index 5d6b43652e18..af5a791e21f3 100644
--- a/net/mac80211/sta_info.h
+++ b/net/mac80211/sta_info.h
@@ -68,43 +68,37 @@ enum ieee80211_sta_info_flags {
68/** 68/**
69 * struct tid_ampdu_tx - TID aggregation information (Tx). 69 * struct tid_ampdu_tx - TID aggregation information (Tx).
70 * 70 *
71 * @state: TID's state in session state machine.
72 * @dialog_token: dialog token for aggregation session
73 * @ssn: Starting Sequence Number expected to be aggregated.
74 * @addba_resp_timer: timer for peer's response to addba request 71 * @addba_resp_timer: timer for peer's response to addba request
75 * @addba_req_num: number of times addBA request has been sent. 72 * @ssn: Starting Sequence Number expected to be aggregated.
73 * @dialog_token: dialog token for aggregation session
76 */ 74 */
77struct tid_ampdu_tx { 75struct tid_ampdu_tx {
78 u8 state;
79 u8 dialog_token;
80 u16 ssn;
81 struct timer_list addba_resp_timer; 76 struct timer_list addba_resp_timer;
82 u8 addba_req_num; 77 u16 ssn;
78 u8 dialog_token;
83}; 79};
84 80
85/** 81/**
86 * struct tid_ampdu_rx - TID aggregation information (Rx). 82 * struct tid_ampdu_rx - TID aggregation information (Rx).
87 * 83 *
88 * @state: TID's state in session state machine.
89 * @dialog_token: dialog token for aggregation session
90 * @ssn: Starting Sequence Number expected to be aggregated.
91 * @buf_size: buffer size for incoming A-MPDUs
92 * @timeout: reset timer value.
93 * @head_seq_num: head sequence number in reordering buffer. 84 * @head_seq_num: head sequence number in reordering buffer.
94 * @stored_mpdu_num: number of MPDUs in reordering buffer 85 * @stored_mpdu_num: number of MPDUs in reordering buffer
95 * @reorder_buf: buffer to reorder incoming aggregated MPDUs 86 * @reorder_buf: buffer to reorder incoming aggregated MPDUs
96 * @session_timer: check if peer keeps Tx-ing on the TID (by timeout value) 87 * @session_timer: check if peer keeps Tx-ing on the TID (by timeout value)
88 * @ssn: Starting Sequence Number expected to be aggregated.
89 * @buf_size: buffer size for incoming A-MPDUs
90 * @timeout: reset timer value.
91 * @dialog_token: dialog token for aggregation session
97 */ 92 */
98struct tid_ampdu_rx { 93struct tid_ampdu_rx {
99 u8 state;
100 u8 dialog_token;
101 u16 ssn;
102 u16 buf_size;
103 u16 timeout;
104 u16 head_seq_num; 94 u16 head_seq_num;
105 u16 stored_mpdu_num; 95 u16 stored_mpdu_num;
106 struct sk_buff **reorder_buf; 96 struct sk_buff **reorder_buf;
107 struct timer_list session_timer; 97 struct timer_list session_timer;
98 u16 ssn;
99 u16 buf_size;
100 u16 timeout;
101 u8 dialog_token;
108}; 102};
109 103
110/** 104/**
@@ -133,16 +127,24 @@ enum plink_state {
133/** 127/**
134 * struct sta_ampdu_mlme - STA aggregation information. 128 * struct sta_ampdu_mlme - STA aggregation information.
135 * 129 *
130 * @tid_state_rx: TID's state in Rx session state machine.
136 * @tid_rx: aggregation info for Rx per TID 131 * @tid_rx: aggregation info for Rx per TID
137 * @tid_tx: aggregation info for Tx per TID
138 * @ampdu_rx: for locking sections in aggregation Rx flow 132 * @ampdu_rx: for locking sections in aggregation Rx flow
133 * @tid_state_tx: TID's state in Tx session state machine.
134 * @tid_tx: aggregation info for Tx per TID
135 * @addba_req_num: number of times addBA request has been sent.
139 * @ampdu_tx: for locking sectionsi in aggregation Tx flow 136 * @ampdu_tx: for locking sectionsi in aggregation Tx flow
140 * @dialog_token_allocator: dialog token enumerator for each new session; 137 * @dialog_token_allocator: dialog token enumerator for each new session;
141 */ 138 */
142struct sta_ampdu_mlme { 139struct sta_ampdu_mlme {
143 struct tid_ampdu_rx tid_rx[STA_TID_NUM]; 140 /* rx */
144 struct tid_ampdu_tx tid_tx[STA_TID_NUM]; 141 u8 tid_state_rx[STA_TID_NUM];
142 struct tid_ampdu_rx *tid_rx[STA_TID_NUM];
145 spinlock_t ampdu_rx; 143 spinlock_t ampdu_rx;
144 /* tx */
145 u8 tid_state_tx[STA_TID_NUM];
146 struct tid_ampdu_tx *tid_tx[STA_TID_NUM];
147 u8 addba_req_num[STA_TID_NUM];
146 spinlock_t ampdu_tx; 148 spinlock_t ampdu_tx;
147 u8 dialog_token_allocator; 149 u8 dialog_token_allocator;
148}; 150};