aboutsummaryrefslogtreecommitdiffstats
path: root/net/mac80211/tx.c
diff options
context:
space:
mode:
authorJohannes Berg <johannes.berg@intel.com>2010-06-10 04:21:39 -0400
committerJohn W. Linville <linville@tuxdriver.com>2010-06-14 15:39:27 -0400
commita622ab72b4dcfdf53e24b16e9530cb876979a00c (patch)
tree170d2ccf3a594f3675b5fa58378319031054e806 /net/mac80211/tx.c
parenta87f736d942c86255e3088c606f0e3eab6bbf784 (diff)
mac80211: use RCU for TX aggregation
Currently we allocate some memory for each TX aggregation session and additionally keep a state bitmap indicating the state it is in. By using RCU to protect the pointer, moving the state into the structure and some locking trickery we can avoid locking when the TX agg session is fully operational. Signed-off-by: Johannes Berg <johannes.berg@intel.com> Signed-off-by: John W. Linville <linville@tuxdriver.com>
Diffstat (limited to 'net/mac80211/tx.c')
-rw-r--r--net/mac80211/tx.c88
1 files changed, 58 insertions, 30 deletions
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
index 680bcb7093db..7bf1f9c9ea34 100644
--- a/net/mac80211/tx.c
+++ b/net/mac80211/tx.c
@@ -1092,6 +1092,54 @@ static bool __ieee80211_parse_tx_radiotap(struct ieee80211_tx_data *tx,
1092 return true; 1092 return true;
1093} 1093}
1094 1094
1095static bool ieee80211_tx_prep_agg(struct ieee80211_tx_data *tx,
1096 struct sk_buff *skb,
1097 struct ieee80211_tx_info *info,
1098 struct tid_ampdu_tx *tid_tx,
1099 int tid)
1100{
1101 bool queued = false;
1102
1103 if (test_bit(HT_AGG_STATE_OPERATIONAL, &tid_tx->state)) {
1104 info->flags |= IEEE80211_TX_CTL_AMPDU;
1105 } else {
1106 spin_lock(&tx->sta->lock);
1107 /*
1108 * Need to re-check now, because we may get here
1109 *
1110 * 1) in the window during which the setup is actually
1111 * already done, but not marked yet because not all
1112 * packets are spliced over to the driver pending
1113 * queue yet -- if this happened we acquire the lock
1114 * either before or after the splice happens, but
1115 * need to recheck which of these cases happened.
1116 *
1117 * 2) during session teardown, if the OPERATIONAL bit
1118 * was cleared due to the teardown but the pointer
1119 * hasn't been assigned NULL yet (or we loaded it
1120 * before it was assigned) -- in this case it may
1121 * now be NULL which means we should just let the
1122 * packet pass through because splicing the frames
1123 * back is already done.
1124 */
1125 tid_tx = tx->sta->ampdu_mlme.tid_tx[tid];
1126
1127 if (!tid_tx) {
1128 /* do nothing, let packet pass through */
1129 } else if (test_bit(HT_AGG_STATE_OPERATIONAL, &tid_tx->state)) {
1130 info->flags |= IEEE80211_TX_CTL_AMPDU;
1131 } else {
1132 queued = true;
1133 info->control.vif = &tx->sdata->vif;
1134 info->flags |= IEEE80211_TX_INTFL_NEED_TXPROCESSING;
1135 __skb_queue_tail(&tid_tx->pending, skb);
1136 }
1137 spin_unlock(&tx->sta->lock);
1138 }
1139
1140 return queued;
1141}
1142
1095/* 1143/*
1096 * initialises @tx 1144 * initialises @tx
1097 */ 1145 */
@@ -1104,8 +1152,7 @@ ieee80211_tx_prepare(struct ieee80211_sub_if_data *sdata,
1104 struct ieee80211_hdr *hdr; 1152 struct ieee80211_hdr *hdr;
1105 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 1153 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1106 int hdrlen, tid; 1154 int hdrlen, tid;
1107 u8 *qc, *state; 1155 u8 *qc;
1108 bool queued = false;
1109 1156
1110 memset(tx, 0, sizeof(*tx)); 1157 memset(tx, 0, sizeof(*tx));
1111 tx->skb = skb; 1158 tx->skb = skb;
@@ -1157,35 +1204,16 @@ ieee80211_tx_prepare(struct ieee80211_sub_if_data *sdata,
1157 qc = ieee80211_get_qos_ctl(hdr); 1204 qc = ieee80211_get_qos_ctl(hdr);
1158 tid = *qc & IEEE80211_QOS_CTL_TID_MASK; 1205 tid = *qc & IEEE80211_QOS_CTL_TID_MASK;
1159 1206
1160 spin_lock(&tx->sta->lock); 1207 tid_tx = rcu_dereference(tx->sta->ampdu_mlme.tid_tx[tid]);
1161 /* 1208 if (tid_tx) {
1162 * XXX: This spinlock could be fairly expensive, but see the 1209 bool queued;
1163 * comment in agg-tx.c:ieee80211_agg_tx_operational().
1164 * One way to solve this would be to do something RCU-like
1165 * for managing the tid_tx struct and using atomic bitops
1166 * for the actual state -- by introducing an actual
1167 * 'operational' bit that would be possible. It would
1168 * require changing ieee80211_agg_tx_operational() to
1169 * set that bit, and changing the way tid_tx is managed
1170 * everywhere, including races between that bit and
1171 * tid_tx going away (tid_tx being added can be easily
1172 * committed to memory before the 'operational' bit).
1173 */
1174 tid_tx = tx->sta->ampdu_mlme.tid_tx[tid];
1175 state = &tx->sta->ampdu_mlme.tid_state_tx[tid];
1176 if (*state == HT_AGG_STATE_OPERATIONAL) {
1177 info->flags |= IEEE80211_TX_CTL_AMPDU;
1178 } else if (*state != HT_AGG_STATE_IDLE) {
1179 /* in progress */
1180 queued = true;
1181 info->control.vif = &sdata->vif;
1182 info->flags |= IEEE80211_TX_INTFL_NEED_TXPROCESSING;
1183 __skb_queue_tail(&tid_tx->pending, skb);
1184 }
1185 spin_unlock(&tx->sta->lock);
1186 1210
1187 if (unlikely(queued)) 1211 queued = ieee80211_tx_prep_agg(tx, skb, info,
1188 return TX_QUEUED; 1212 tid_tx, tid);
1213
1214 if (unlikely(queued))
1215 return TX_QUEUED;
1216 }
1189 } 1217 }
1190 1218
1191 if (is_multicast_ether_addr(hdr->addr1)) { 1219 if (is_multicast_ether_addr(hdr->addr1)) {