aboutsummaryrefslogtreecommitdiffstats
path: root/net/mac80211/mesh_hwmp.c
diff options
context:
space:
mode:
authorJavier Cardona <javier@cozybit.com>2011-11-04 00:11:10 -0400
committerJohn W. Linville <linville@tuxdriver.com>2011-11-09 16:01:02 -0500
commitf3011cf9deb689bd68279c728c501a4166983c19 (patch)
tree7d9945a2934c6de78063c358a6d350af8b6a19a9 /net/mac80211/mesh_hwmp.c
parent7e1e386421e2ec7804b77f2c1c8e2517e82ecb7e (diff)
mac80211: Avoid filling up mesh preq queue with redundant requests
Don't accept redundant PREQs for a given destination. This fixes a problem under high load: kernel: [20386.250913] mesh_queue_preq: 235 callbacks suppressed kernel: [20386.253335] Mesh HWMP (mesh0): PREQ node queue full kernel: [20386.253352] Mesh HWMP (mesh0): PREQ node queue full (...) The 802.11s protocol has a provision to limit the rate of path requests (PREQs) are transmitted (dot11MeshHWMPpreqMinInterval) but there was no limit on the rate at which PREQs were being queued up. There is a valid reason for queuing PREQs: this way we can even out PREQ bursts. But queueing multiple PREQs for the same destination is useless. Reported-by: Pedro Larbig <pedro.larbig@carhs.de> Signed-off-by: Javier Cardona <javier@cozybit.com> Signed-off-by: John W. Linville <linville@tuxdriver.com>
Diffstat (limited to 'net/mac80211/mesh_hwmp.c')
-rw-r--r--net/mac80211/mesh_hwmp.c15
1 files changed, 13 insertions, 2 deletions
diff --git a/net/mac80211/mesh_hwmp.c b/net/mac80211/mesh_hwmp.c
index 9a1f8bbc49b8..b22b223ccde1 100644
--- a/net/mac80211/mesh_hwmp.c
+++ b/net/mac80211/mesh_hwmp.c
@@ -867,9 +867,19 @@ static void mesh_queue_preq(struct mesh_path *mpath, u8 flags)
867 return; 867 return;
868 } 868 }
869 869
870 spin_lock_bh(&mpath->state_lock);
871 if (mpath->flags & MESH_PATH_REQ_QUEUED) {
872 spin_unlock_bh(&mpath->state_lock);
873 spin_unlock_bh(&ifmsh->mesh_preq_queue_lock);
874 return;
875 }
876
870 memcpy(preq_node->dst, mpath->dst, ETH_ALEN); 877 memcpy(preq_node->dst, mpath->dst, ETH_ALEN);
871 preq_node->flags = flags; 878 preq_node->flags = flags;
872 879
880 mpath->flags |= MESH_PATH_REQ_QUEUED;
881 spin_unlock_bh(&mpath->state_lock);
882
873 list_add_tail(&preq_node->list, &ifmsh->preq_queue.list); 883 list_add_tail(&preq_node->list, &ifmsh->preq_queue.list);
874 ++ifmsh->preq_queue_len; 884 ++ifmsh->preq_queue_len;
875 spin_unlock_bh(&ifmsh->mesh_preq_queue_lock); 885 spin_unlock_bh(&ifmsh->mesh_preq_queue_lock);
@@ -921,6 +931,7 @@ void mesh_path_start_discovery(struct ieee80211_sub_if_data *sdata)
921 goto enddiscovery; 931 goto enddiscovery;
922 932
923 spin_lock_bh(&mpath->state_lock); 933 spin_lock_bh(&mpath->state_lock);
934 mpath->flags &= ~MESH_PATH_REQ_QUEUED;
924 if (preq_node->flags & PREQ_Q_F_START) { 935 if (preq_node->flags & PREQ_Q_F_START) {
925 if (mpath->flags & MESH_PATH_RESOLVING) { 936 if (mpath->flags & MESH_PATH_RESOLVING) {
926 spin_unlock_bh(&mpath->state_lock); 937 spin_unlock_bh(&mpath->state_lock);
@@ -1028,8 +1039,7 @@ int mesh_nexthop_lookup(struct sk_buff *skb,
1028 mesh_queue_preq(mpath, PREQ_Q_F_START); 1039 mesh_queue_preq(mpath, PREQ_Q_F_START);
1029 } 1040 }
1030 1041
1031 if (skb_queue_len(&mpath->frame_queue) >= 1042 if (skb_queue_len(&mpath->frame_queue) >= MESH_FRAME_QUEUE_LEN)
1032 MESH_FRAME_QUEUE_LEN)
1033 skb_to_free = skb_dequeue(&mpath->frame_queue); 1043 skb_to_free = skb_dequeue(&mpath->frame_queue);
1034 1044
1035 info->flags |= IEEE80211_TX_INTFL_NEED_TXPROCESSING; 1045 info->flags |= IEEE80211_TX_INTFL_NEED_TXPROCESSING;
@@ -1061,6 +1071,7 @@ void mesh_path_timer(unsigned long data)
1061 } else if (mpath->discovery_retries < max_preq_retries(sdata)) { 1071 } else if (mpath->discovery_retries < max_preq_retries(sdata)) {
1062 ++mpath->discovery_retries; 1072 ++mpath->discovery_retries;
1063 mpath->discovery_timeout *= 2; 1073 mpath->discovery_timeout *= 2;
1074 mpath->flags &= ~MESH_PATH_REQ_QUEUED;
1064 spin_unlock_bh(&mpath->state_lock); 1075 spin_unlock_bh(&mpath->state_lock);
1065 mesh_queue_preq(mpath, 0); 1076 mesh_queue_preq(mpath, 0);
1066 } else { 1077 } else {