aboutsummaryrefslogtreecommitdiffstats
path: root/net/mac80211/tx.c
diff options
context:
space:
mode:
Diffstat (limited to 'net/mac80211/tx.c')
-rw-r--r--net/mac80211/tx.c112
1 files changed, 70 insertions, 42 deletions
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
index 680bcb7093db..c54db966926b 100644
--- a/net/mac80211/tx.c
+++ b/net/mac80211/tx.c
@@ -576,17 +576,6 @@ ieee80211_tx_h_select_key(struct ieee80211_tx_data *tx)
576} 576}
577 577
578static ieee80211_tx_result debug_noinline 578static ieee80211_tx_result debug_noinline
579ieee80211_tx_h_sta(struct ieee80211_tx_data *tx)
580{
581 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb);
582
583 if (tx->sta && tx->sta->uploaded)
584 info->control.sta = &tx->sta->sta;
585
586 return TX_CONTINUE;
587}
588
589static ieee80211_tx_result debug_noinline
590ieee80211_tx_h_rate_ctrl(struct ieee80211_tx_data *tx) 579ieee80211_tx_h_rate_ctrl(struct ieee80211_tx_data *tx)
591{ 580{
592 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb); 581 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb);
@@ -1092,6 +1081,59 @@ static bool __ieee80211_parse_tx_radiotap(struct ieee80211_tx_data *tx,
1092 return true; 1081 return true;
1093} 1082}
1094 1083
1084static bool ieee80211_tx_prep_agg(struct ieee80211_tx_data *tx,
1085 struct sk_buff *skb,
1086 struct ieee80211_tx_info *info,
1087 struct tid_ampdu_tx *tid_tx,
1088 int tid)
1089{
1090 bool queued = false;
1091
1092 if (test_bit(HT_AGG_STATE_OPERATIONAL, &tid_tx->state)) {
1093 info->flags |= IEEE80211_TX_CTL_AMPDU;
1094 } else if (test_bit(HT_AGG_STATE_WANT_START, &tid_tx->state)) {
1095 /*
1096 * nothing -- this aggregation session is being started
1097 * but that might still fail with the driver
1098 */
1099 } else {
1100 spin_lock(&tx->sta->lock);
1101 /*
1102 * Need to re-check now, because we may get here
1103 *
1104 * 1) in the window during which the setup is actually
1105 * already done, but not marked yet because not all
1106 * packets are spliced over to the driver pending
1107 * queue yet -- if this happened we acquire the lock
1108 * either before or after the splice happens, but
1109 * need to recheck which of these cases happened.
1110 *
1111 * 2) during session teardown, if the OPERATIONAL bit
1112 * was cleared due to the teardown but the pointer
1113 * hasn't been assigned NULL yet (or we loaded it
1114 * before it was assigned) -- in this case it may
1115 * now be NULL which means we should just let the
1116 * packet pass through because splicing the frames
1117 * back is already done.
1118 */
1119 tid_tx = tx->sta->ampdu_mlme.tid_tx[tid];
1120
1121 if (!tid_tx) {
1122 /* do nothing, let packet pass through */
1123 } else if (test_bit(HT_AGG_STATE_OPERATIONAL, &tid_tx->state)) {
1124 info->flags |= IEEE80211_TX_CTL_AMPDU;
1125 } else {
1126 queued = true;
1127 info->control.vif = &tx->sdata->vif;
1128 info->flags |= IEEE80211_TX_INTFL_NEED_TXPROCESSING;
1129 __skb_queue_tail(&tid_tx->pending, skb);
1130 }
1131 spin_unlock(&tx->sta->lock);
1132 }
1133
1134 return queued;
1135}
1136
1095/* 1137/*
1096 * initialises @tx 1138 * initialises @tx
1097 */ 1139 */
@@ -1104,8 +1146,7 @@ ieee80211_tx_prepare(struct ieee80211_sub_if_data *sdata,
1104 struct ieee80211_hdr *hdr; 1146 struct ieee80211_hdr *hdr;
1105 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 1147 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1106 int hdrlen, tid; 1148 int hdrlen, tid;
1107 u8 *qc, *state; 1149 u8 *qc;
1108 bool queued = false;
1109 1150
1110 memset(tx, 0, sizeof(*tx)); 1151 memset(tx, 0, sizeof(*tx));
1111 tx->skb = skb; 1152 tx->skb = skb;
@@ -1157,35 +1198,16 @@ ieee80211_tx_prepare(struct ieee80211_sub_if_data *sdata,
1157 qc = ieee80211_get_qos_ctl(hdr); 1198 qc = ieee80211_get_qos_ctl(hdr);
1158 tid = *qc & IEEE80211_QOS_CTL_TID_MASK; 1199 tid = *qc & IEEE80211_QOS_CTL_TID_MASK;
1159 1200
1160 spin_lock(&tx->sta->lock); 1201 tid_tx = rcu_dereference(tx->sta->ampdu_mlme.tid_tx[tid]);
1161 /* 1202 if (tid_tx) {
1162 * XXX: This spinlock could be fairly expensive, but see the 1203 bool queued;
1163 * comment in agg-tx.c:ieee80211_agg_tx_operational().
1164 * One way to solve this would be to do something RCU-like
1165 * for managing the tid_tx struct and using atomic bitops
1166 * for the actual state -- by introducing an actual
1167 * 'operational' bit that would be possible. It would
1168 * require changing ieee80211_agg_tx_operational() to
1169 * set that bit, and changing the way tid_tx is managed
1170 * everywhere, including races between that bit and
1171 * tid_tx going away (tid_tx being added can be easily
1172 * committed to memory before the 'operational' bit).
1173 */
1174 tid_tx = tx->sta->ampdu_mlme.tid_tx[tid];
1175 state = &tx->sta->ampdu_mlme.tid_state_tx[tid];
1176 if (*state == HT_AGG_STATE_OPERATIONAL) {
1177 info->flags |= IEEE80211_TX_CTL_AMPDU;
1178 } else if (*state != HT_AGG_STATE_IDLE) {
1179 /* in progress */
1180 queued = true;
1181 info->control.vif = &sdata->vif;
1182 info->flags |= IEEE80211_TX_INTFL_NEED_TXPROCESSING;
1183 __skb_queue_tail(&tid_tx->pending, skb);
1184 }
1185 spin_unlock(&tx->sta->lock);
1186 1204
1187 if (unlikely(queued)) 1205 queued = ieee80211_tx_prep_agg(tx, skb, info,
1188 return TX_QUEUED; 1206 tid_tx, tid);
1207
1208 if (unlikely(queued))
1209 return TX_QUEUED;
1210 }
1189 } 1211 }
1190 1212
1191 if (is_multicast_ether_addr(hdr->addr1)) { 1213 if (is_multicast_ether_addr(hdr->addr1)) {
@@ -1274,6 +1296,11 @@ static int __ieee80211_tx(struct ieee80211_local *local,
1274 break; 1296 break;
1275 } 1297 }
1276 1298
1299 if (sta && sta->uploaded)
1300 info->control.sta = &sta->sta;
1301 else
1302 info->control.sta = NULL;
1303
1277 ret = drv_tx(local, skb); 1304 ret = drv_tx(local, skb);
1278 if (WARN_ON(ret != NETDEV_TX_OK && skb->len != len)) { 1305 if (WARN_ON(ret != NETDEV_TX_OK && skb->len != len)) {
1279 dev_kfree_skb(skb); 1306 dev_kfree_skb(skb);
@@ -1313,7 +1340,6 @@ static int invoke_tx_handlers(struct ieee80211_tx_data *tx)
1313 CALL_TXH(ieee80211_tx_h_check_assoc); 1340 CALL_TXH(ieee80211_tx_h_check_assoc);
1314 CALL_TXH(ieee80211_tx_h_ps_buf); 1341 CALL_TXH(ieee80211_tx_h_ps_buf);
1315 CALL_TXH(ieee80211_tx_h_select_key); 1342 CALL_TXH(ieee80211_tx_h_select_key);
1316 CALL_TXH(ieee80211_tx_h_sta);
1317 if (!(tx->local->hw.flags & IEEE80211_HW_HAS_RATE_CONTROL)) 1343 if (!(tx->local->hw.flags & IEEE80211_HW_HAS_RATE_CONTROL))
1318 CALL_TXH(ieee80211_tx_h_rate_ctrl); 1344 CALL_TXH(ieee80211_tx_h_rate_ctrl);
1319 1345
@@ -1909,11 +1935,13 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
1909 h_pos += encaps_len; 1935 h_pos += encaps_len;
1910 } 1936 }
1911 1937
1938#ifdef CONFIG_MAC80211_MESH
1912 if (meshhdrlen > 0) { 1939 if (meshhdrlen > 0) {
1913 memcpy(skb_push(skb, meshhdrlen), &mesh_hdr, meshhdrlen); 1940 memcpy(skb_push(skb, meshhdrlen), &mesh_hdr, meshhdrlen);
1914 nh_pos += meshhdrlen; 1941 nh_pos += meshhdrlen;
1915 h_pos += meshhdrlen; 1942 h_pos += meshhdrlen;
1916 } 1943 }
1944#endif
1917 1945
1918 if (ieee80211_is_data_qos(fc)) { 1946 if (ieee80211_is_data_qos(fc)) {
1919 __le16 *qos_control; 1947 __le16 *qos_control;