aboutsummaryrefslogtreecommitdiffstats
path: root/net/mac80211/rx.c
diff options
context:
space:
mode:
authorJohannes Berg <johannes.berg@intel.com>2010-06-10 04:21:38 -0400
committerJohn W. Linville <linville@tuxdriver.com>2010-06-14 15:39:27 -0400
commita87f736d942c86255e3088c606f0e3eab6bbf784 (patch)
tree74d3d63dca9fc982fcb7c165247d6924c021b4ef /net/mac80211/rx.c
parentc1475ca99edcc7216ddc45838ab2c3281c14ba22 (diff)
mac80211: use RCU for RX aggregation
Currently we allocate some memory for each RX aggregation session and additionally keep a flag indicating whether or not it is valid. By using RCU to protect the pointer and making sure that the memory is fully set up before it becomes visible to the RX path, we can remove the need for the bool that indicates validity, as well as for locking on the RX path since it is always synchronised against itself, and we can guarantee that all other modifications are done when the structure is not visible to the RX path. The net result is that since we remove locking requirements from the RX path, we can in the future use any kind of lock for the setup and teardown code paths. Signed-off-by: Johannes Berg <johannes.berg@intel.com> Signed-off-by: John W. Linville <linville@tuxdriver.com>
Diffstat (limited to 'net/mac80211/rx.c')
-rw-r--r--net/mac80211/rx.c34
1 files changed, 15 insertions, 19 deletions
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index 84f11733b9fe..ee01daccacbb 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -719,16 +719,13 @@ static void ieee80211_rx_reorder_ampdu(struct ieee80211_rx_data *rx,
719 719
720 tid = *ieee80211_get_qos_ctl(hdr) & IEEE80211_QOS_CTL_TID_MASK; 720 tid = *ieee80211_get_qos_ctl(hdr) & IEEE80211_QOS_CTL_TID_MASK;
721 721
722 spin_lock(&sta->lock); 722 tid_agg_rx = rcu_dereference(sta->ampdu_mlme.tid_rx[tid]);
723 723 if (!tid_agg_rx)
724 if (!sta->ampdu_mlme.tid_active_rx[tid]) 724 goto dont_reorder;
725 goto dont_reorder_unlock;
726
727 tid_agg_rx = sta->ampdu_mlme.tid_rx[tid];
728 725
729 /* qos null data frames are excluded */ 726 /* qos null data frames are excluded */
730 if (unlikely(hdr->frame_control & cpu_to_le16(IEEE80211_STYPE_NULLFUNC))) 727 if (unlikely(hdr->frame_control & cpu_to_le16(IEEE80211_STYPE_NULLFUNC)))
731 goto dont_reorder_unlock; 728 goto dont_reorder;
732 729
733 /* new, potentially un-ordered, ampdu frame - process it */ 730 /* new, potentially un-ordered, ampdu frame - process it */
734 731
@@ -740,20 +737,22 @@ static void ieee80211_rx_reorder_ampdu(struct ieee80211_rx_data *rx,
740 /* if this mpdu is fragmented - terminate rx aggregation session */ 737 /* if this mpdu is fragmented - terminate rx aggregation session */
741 sc = le16_to_cpu(hdr->seq_ctrl); 738 sc = le16_to_cpu(hdr->seq_ctrl);
742 if (sc & IEEE80211_SCTL_FRAG) { 739 if (sc & IEEE80211_SCTL_FRAG) {
743 spin_unlock(&sta->lock);
744 skb->pkt_type = IEEE80211_SDATA_QUEUE_TYPE_FRAME; 740 skb->pkt_type = IEEE80211_SDATA_QUEUE_TYPE_FRAME;
745 skb_queue_tail(&rx->sdata->skb_queue, skb); 741 skb_queue_tail(&rx->sdata->skb_queue, skb);
746 ieee80211_queue_work(&local->hw, &rx->sdata->work); 742 ieee80211_queue_work(&local->hw, &rx->sdata->work);
747 return; 743 return;
748 } 744 }
749 745
750 if (ieee80211_sta_manage_reorder_buf(hw, tid_agg_rx, skb, frames)) { 746 /*
751 spin_unlock(&sta->lock); 747 * No locking needed -- we will only ever process one
748 * RX packet at a time, and thus own tid_agg_rx. All
749 * other code manipulating it needs to (and does) make
750 * sure that we cannot get to it any more before doing
751 * anything with it.
752 */
753 if (ieee80211_sta_manage_reorder_buf(hw, tid_agg_rx, skb, frames))
752 return; 754 return;
753 }
754 755
755 dont_reorder_unlock:
756 spin_unlock(&sta->lock);
757 dont_reorder: 756 dont_reorder:
758 __skb_queue_tail(frames, skb); 757 __skb_queue_tail(frames, skb);
759} 758}
@@ -1830,13 +1829,11 @@ ieee80211_rx_h_ctrl(struct ieee80211_rx_data *rx, struct sk_buff_head *frames)
1830 &bar_data, sizeof(bar_data))) 1829 &bar_data, sizeof(bar_data)))
1831 return RX_DROP_MONITOR; 1830 return RX_DROP_MONITOR;
1832 1831
1833 spin_lock(&rx->sta->lock);
1834 tid = le16_to_cpu(bar_data.control) >> 12; 1832 tid = le16_to_cpu(bar_data.control) >> 12;
1835 if (!rx->sta->ampdu_mlme.tid_active_rx[tid]) { 1833
1836 spin_unlock(&rx->sta->lock); 1834 tid_agg_rx = rcu_dereference(rx->sta->ampdu_mlme.tid_rx[tid]);
1835 if (!tid_agg_rx)
1837 return RX_DROP_MONITOR; 1836 return RX_DROP_MONITOR;
1838 }
1839 tid_agg_rx = rx->sta->ampdu_mlme.tid_rx[tid];
1840 1837
1841 start_seq_num = le16_to_cpu(bar_data.start_seq_num) >> 4; 1838 start_seq_num = le16_to_cpu(bar_data.start_seq_num) >> 4;
1842 1839
@@ -1849,7 +1846,6 @@ ieee80211_rx_h_ctrl(struct ieee80211_rx_data *rx, struct sk_buff_head *frames)
1849 ieee80211_release_reorder_frames(hw, tid_agg_rx, start_seq_num, 1846 ieee80211_release_reorder_frames(hw, tid_agg_rx, start_seq_num,
1850 frames); 1847 frames);
1851 kfree_skb(skb); 1848 kfree_skb(skb);
1852 spin_unlock(&rx->sta->lock);
1853 return RX_QUEUED; 1849 return RX_QUEUED;
1854 } 1850 }
1855 1851