diff options
Diffstat (limited to 'net/mac80211/tx.c')
-rw-r--r-- | net/mac80211/tx.c | 93 |
1 files changed, 63 insertions, 30 deletions
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c index 680bcb7093db..698d4718b1a4 100644 --- a/net/mac80211/tx.c +++ b/net/mac80211/tx.c | |||
@@ -1092,6 +1092,59 @@ static bool __ieee80211_parse_tx_radiotap(struct ieee80211_tx_data *tx, | |||
1092 | return true; | 1092 | return true; |
1093 | } | 1093 | } |
1094 | 1094 | ||
1095 | static bool ieee80211_tx_prep_agg(struct ieee80211_tx_data *tx, | ||
1096 | struct sk_buff *skb, | ||
1097 | struct ieee80211_tx_info *info, | ||
1098 | struct tid_ampdu_tx *tid_tx, | ||
1099 | int tid) | ||
1100 | { | ||
1101 | bool queued = false; | ||
1102 | |||
1103 | if (test_bit(HT_AGG_STATE_OPERATIONAL, &tid_tx->state)) { | ||
1104 | info->flags |= IEEE80211_TX_CTL_AMPDU; | ||
1105 | } else if (test_bit(HT_AGG_STATE_WANT_START, &tid_tx->state)) { | ||
1106 | /* | ||
1107 | * nothing -- this aggregation session is being started | ||
1108 | * but that might still fail with the driver | ||
1109 | */ | ||
1110 | } else { | ||
1111 | spin_lock(&tx->sta->lock); | ||
1112 | /* | ||
1113 | * Need to re-check now, because we may get here | ||
1114 | * | ||
1115 | * 1) in the window during which the setup is actually | ||
1116 | * already done, but not marked yet because not all | ||
1117 | * packets are spliced over to the driver pending | ||
1118 | * queue yet -- if this happened we acquire the lock | ||
1119 | * either before or after the splice happens, but | ||
1120 | * need to recheck which of these cases happened. | ||
1121 | * | ||
1122 | * 2) during session teardown, if the OPERATIONAL bit | ||
1123 | * was cleared due to the teardown but the pointer | ||
1124 | * hasn't been assigned NULL yet (or we loaded it | ||
1125 | * before it was assigned) -- in this case it may | ||
1126 | * now be NULL which means we should just let the | ||
1127 | * packet pass through because splicing the frames | ||
1128 | * back is already done. | ||
1129 | */ | ||
1130 | tid_tx = tx->sta->ampdu_mlme.tid_tx[tid]; | ||
1131 | |||
1132 | if (!tid_tx) { | ||
1133 | /* do nothing, let packet pass through */ | ||
1134 | } else if (test_bit(HT_AGG_STATE_OPERATIONAL, &tid_tx->state)) { | ||
1135 | info->flags |= IEEE80211_TX_CTL_AMPDU; | ||
1136 | } else { | ||
1137 | queued = true; | ||
1138 | info->control.vif = &tx->sdata->vif; | ||
1139 | info->flags |= IEEE80211_TX_INTFL_NEED_TXPROCESSING; | ||
1140 | __skb_queue_tail(&tid_tx->pending, skb); | ||
1141 | } | ||
1142 | spin_unlock(&tx->sta->lock); | ||
1143 | } | ||
1144 | |||
1145 | return queued; | ||
1146 | } | ||
1147 | |||
1095 | /* | 1148 | /* |
1096 | * initialises @tx | 1149 | * initialises @tx |
1097 | */ | 1150 | */ |
@@ -1104,8 +1157,7 @@ ieee80211_tx_prepare(struct ieee80211_sub_if_data *sdata, | |||
1104 | struct ieee80211_hdr *hdr; | 1157 | struct ieee80211_hdr *hdr; |
1105 | struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); | 1158 | struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); |
1106 | int hdrlen, tid; | 1159 | int hdrlen, tid; |
1107 | u8 *qc, *state; | 1160 | u8 *qc; |
1108 | bool queued = false; | ||
1109 | 1161 | ||
1110 | memset(tx, 0, sizeof(*tx)); | 1162 | memset(tx, 0, sizeof(*tx)); |
1111 | tx->skb = skb; | 1163 | tx->skb = skb; |
@@ -1157,35 +1209,16 @@ ieee80211_tx_prepare(struct ieee80211_sub_if_data *sdata, | |||
1157 | qc = ieee80211_get_qos_ctl(hdr); | 1209 | qc = ieee80211_get_qos_ctl(hdr); |
1158 | tid = *qc & IEEE80211_QOS_CTL_TID_MASK; | 1210 | tid = *qc & IEEE80211_QOS_CTL_TID_MASK; |
1159 | 1211 | ||
1160 | spin_lock(&tx->sta->lock); | 1212 | tid_tx = rcu_dereference(tx->sta->ampdu_mlme.tid_tx[tid]); |
1161 | /* | 1213 | if (tid_tx) { |
1162 | * XXX: This spinlock could be fairly expensive, but see the | 1214 | bool queued; |
1163 | * comment in agg-tx.c:ieee80211_agg_tx_operational(). | ||
1164 | * One way to solve this would be to do something RCU-like | ||
1165 | * for managing the tid_tx struct and using atomic bitops | ||
1166 | * for the actual state -- by introducing an actual | ||
1167 | * 'operational' bit that would be possible. It would | ||
1168 | * require changing ieee80211_agg_tx_operational() to | ||
1169 | * set that bit, and changing the way tid_tx is managed | ||
1170 | * everywhere, including races between that bit and | ||
1171 | * tid_tx going away (tid_tx being added can be easily | ||
1172 | * committed to memory before the 'operational' bit). | ||
1173 | */ | ||
1174 | tid_tx = tx->sta->ampdu_mlme.tid_tx[tid]; | ||
1175 | state = &tx->sta->ampdu_mlme.tid_state_tx[tid]; | ||
1176 | if (*state == HT_AGG_STATE_OPERATIONAL) { | ||
1177 | info->flags |= IEEE80211_TX_CTL_AMPDU; | ||
1178 | } else if (*state != HT_AGG_STATE_IDLE) { | ||
1179 | /* in progress */ | ||
1180 | queued = true; | ||
1181 | info->control.vif = &sdata->vif; | ||
1182 | info->flags |= IEEE80211_TX_INTFL_NEED_TXPROCESSING; | ||
1183 | __skb_queue_tail(&tid_tx->pending, skb); | ||
1184 | } | ||
1185 | spin_unlock(&tx->sta->lock); | ||
1186 | 1215 | ||
1187 | if (unlikely(queued)) | 1216 | queued = ieee80211_tx_prep_agg(tx, skb, info, |
1188 | return TX_QUEUED; | 1217 | tid_tx, tid); |
1218 | |||
1219 | if (unlikely(queued)) | ||
1220 | return TX_QUEUED; | ||
1221 | } | ||
1189 | } | 1222 | } |
1190 | 1223 | ||
1191 | if (is_multicast_ether_addr(hdr->addr1)) { | 1224 | if (is_multicast_ether_addr(hdr->addr1)) { |