summaryrefslogtreecommitdiffstats
path: root/net/mac80211/util.c
diff options
context:
space:
mode:
authorToke Høiland-Jørgensen <toke@toke.dk>2016-09-22 13:04:20 -0400
committerJohannes Berg <johannes.berg@intel.com>2016-09-30 08:46:57 -0400
commitbb42f2d13ffcd0baed7547b37d05add51fcd50e1 (patch)
treee2472cae73b2ad804612bd7da861bc55f043ac0f /net/mac80211/util.c
parent3a53731df7e2a6e238274d13aa8d4826f78030f9 (diff)
mac80211: Move reorder-sensitive TX handlers to after TXQ dequeue
The TXQ intermediate queues can cause packet reordering when more than one flow is active to a single station. Since some of the wifi-specific packet handling (notably sequence number and encryption handling) is sensitive to re-ordering, things break if they are applied before the TXQ. This splits up the TX handlers and fast_xmit logic into two parts: An early part and a late part. The former is applied before TXQ enqueue, and the latter after dequeue. The non-TXQ path just applies both parts at once. Because fragments shouldn't be split up or reordered, the fragmentation handler is run after dequeue. Any fragments are then kept in the TXQ and on subsequent dequeues they take precedence over dequeueing from the FQ structure. This approach avoids having to scatter special cases all over the place for when TXQ is enabled, at the cost of making the fast_xmit and TX handler code slightly more complex. Signed-off-by: Toke Høiland-Jørgensen <toke@toke.dk> [fix a few code-style nits, make ieee80211_xmit_fast_finish void, remove a useless txq->sta check] Signed-off-by: Johannes Berg <johannes.berg@intel.com>
Diffstat (limited to 'net/mac80211/util.c')
-rw-r--r--net/mac80211/util.c11
1 files changed, 9 insertions, 2 deletions
diff --git a/net/mac80211/util.c b/net/mac80211/util.c
index 91754c8dafb2..545c79a42a77 100644
--- a/net/mac80211/util.c
+++ b/net/mac80211/util.c
@@ -3441,11 +3441,18 @@ void ieee80211_txq_get_depth(struct ieee80211_txq *txq,
3441 unsigned long *byte_cnt) 3441 unsigned long *byte_cnt)
3442{ 3442{
3443 struct txq_info *txqi = to_txq_info(txq); 3443 struct txq_info *txqi = to_txq_info(txq);
3444 u32 frag_cnt = 0, frag_bytes = 0;
3445 struct sk_buff *skb;
3446
3447 skb_queue_walk(&txqi->frags, skb) {
3448 frag_cnt++;
3449 frag_bytes += skb->len;
3450 }
3444 3451
3445 if (frame_cnt) 3452 if (frame_cnt)
3446 *frame_cnt = txqi->tin.backlog_packets; 3453 *frame_cnt = txqi->tin.backlog_packets + frag_cnt;
3447 3454
3448 if (byte_cnt) 3455 if (byte_cnt)
3449 *byte_cnt = txqi->tin.backlog_bytes; 3456 *byte_cnt = txqi->tin.backlog_bytes + frag_bytes;
3450} 3457}
3451EXPORT_SYMBOL(ieee80211_txq_get_depth); 3458EXPORT_SYMBOL(ieee80211_txq_get_depth);