aboutsummaryrefslogtreecommitdiffstats
path: root/net/mac80211/mesh_pathtbl.c
diff options
context:
space:
mode:
authorJavier Cardona <javier@cozybit.com>2011-08-29 16:23:07 -0400
committerJohn W. Linville <linville@tuxdriver.com>2011-09-13 15:42:32 -0400
commit19c50b3dc530278a0d07dceebff1683f3bdc4a2b (patch)
treeb15c2df3fb0c5a36aed5814c61e363a378b47df3 /net/mac80211/mesh_pathtbl.c
parentad99d141144c4996c810fe75f04c387214ca360a (diff)
mac80211: Don't iterate twice over all mpaths when once in sufficient
Signed-off-by: Javier Cardona <javier@cozybit.com> Signed-off-by: John W. Linville <linville@tuxdriver.com>
Diffstat (limited to 'net/mac80211/mesh_pathtbl.c')
-rw-r--r--net/mac80211/mesh_pathtbl.c64
1 files changed, 37 insertions, 27 deletions
diff --git a/net/mac80211/mesh_pathtbl.c b/net/mac80211/mesh_pathtbl.c
index 717f38a7134c..4a3053b09e31 100644
--- a/net/mac80211/mesh_pathtbl.c
+++ b/net/mac80211/mesh_pathtbl.c
@@ -49,7 +49,9 @@ int mesh_paths_generation;
49 49
50/* This lock will have the grow table function as writer and add / delete nodes 50/* This lock will have the grow table function as writer and add / delete nodes
51 * as readers. When reading the table (i.e. doing lookups) we are well protected 51 * as readers. When reading the table (i.e. doing lookups) we are well protected
52 * by RCU 52 * by RCU. We need to take this lock when modying the number of buckets
53 * on one of the path tables but we don't need to if adding or removing mpaths
54 * from hash buckets.
53 */ 55 */
54static DEFINE_RWLOCK(pathtbl_resize_lock); 56static DEFINE_RWLOCK(pathtbl_resize_lock);
55 57
@@ -817,6 +819,32 @@ void mesh_path_flush_by_nexthop(struct sta_info *sta)
817 rcu_read_unlock(); 819 rcu_read_unlock();
818} 820}
819 821
822static void mesh_path_node_reclaim(struct rcu_head *rp)
823{
824 struct mpath_node *node = container_of(rp, struct mpath_node, rcu);
825 struct ieee80211_sub_if_data *sdata = node->mpath->sdata;
826
827 del_timer_sync(&node->mpath->timer);
828 atomic_dec(&sdata->u.mesh.mpaths);
829 kfree(node->mpath);
830 kfree(node);
831}
832
833/* needs to be called with the corresponding hashwlock taken */
834static void __mesh_path_del(struct mesh_table *tbl, struct mpath_node *node)
835{
836 struct mesh_path *mpath;
837 mpath = node->mpath;
838 spin_lock(&mpath->state_lock);
839 mpath->flags |= MESH_PATH_RESOLVING;
840 if (mpath->is_gate)
841 mesh_gate_del(tbl, mpath);
842 hlist_del_rcu(&node->list);
843 call_rcu(&node->rcu, mesh_path_node_reclaim);
844 spin_unlock(&mpath->state_lock);
845 atomic_dec(&tbl->entries);
846}
847
820static void mesh_path_flush(struct ieee80211_sub_if_data *sdata) 848static void mesh_path_flush(struct ieee80211_sub_if_data *sdata)
821{ 849{
822 struct mesh_table *tbl; 850 struct mesh_table *tbl;
@@ -829,23 +857,15 @@ static void mesh_path_flush(struct ieee80211_sub_if_data *sdata)
829 tbl = rcu_dereference(mesh_paths); 857 tbl = rcu_dereference(mesh_paths);
830 for_each_mesh_entry(tbl, p, node, i) { 858 for_each_mesh_entry(tbl, p, node, i) {
831 mpath = node->mpath; 859 mpath = node->mpath;
832 if (mpath->sdata == sdata) 860 if (mpath->sdata == sdata) {
833 mesh_path_del(mpath->dst, mpath->sdata); 861 spin_lock_bh(&tbl->hashwlock[i]);
862 __mesh_path_del(tbl, node);
863 spin_unlock_bh(&tbl->hashwlock[i]);
864 }
834 } 865 }
835 rcu_read_unlock(); 866 rcu_read_unlock();
836} 867}
837 868
838static void mesh_path_node_reclaim(struct rcu_head *rp)
839{
840 struct mpath_node *node = container_of(rp, struct mpath_node, rcu);
841 struct ieee80211_sub_if_data *sdata = node->mpath->sdata;
842
843 del_timer_sync(&node->mpath->timer);
844 atomic_dec(&sdata->u.mesh.mpaths);
845 kfree(node->mpath);
846 kfree(node);
847}
848
849static void mpp_path_flush(struct ieee80211_sub_if_data *sdata) 869static void mpp_path_flush(struct ieee80211_sub_if_data *sdata)
850{ 870{
851 struct mesh_table *tbl; 871 struct mesh_table *tbl;
@@ -859,12 +879,8 @@ static void mpp_path_flush(struct ieee80211_sub_if_data *sdata)
859 lockdep_is_held(pathtbl_resize_lock)); 879 lockdep_is_held(pathtbl_resize_lock));
860 for_each_mesh_entry(tbl, p, node, i) { 880 for_each_mesh_entry(tbl, p, node, i) {
861 mpath = node->mpath; 881 mpath = node->mpath;
862 if (mpath->sdata != sdata)
863 continue;
864 spin_lock_bh(&tbl->hashwlock[i]); 882 spin_lock_bh(&tbl->hashwlock[i]);
865 hlist_del_rcu(&node->list); 883 __mesh_path_del(tbl, node);
866 call_rcu(&node->rcu, mesh_path_node_reclaim);
867 atomic_dec(&tbl->entries);
868 spin_unlock_bh(&tbl->hashwlock[i]); 884 spin_unlock_bh(&tbl->hashwlock[i]);
869 } 885 }
870 read_unlock_bh(&pathtbl_resize_lock); 886 read_unlock_bh(&pathtbl_resize_lock);
@@ -912,14 +928,7 @@ int mesh_path_del(u8 *addr, struct ieee80211_sub_if_data *sdata)
912 mpath = node->mpath; 928 mpath = node->mpath;
913 if (mpath->sdata == sdata && 929 if (mpath->sdata == sdata &&
914 memcmp(addr, mpath->dst, ETH_ALEN) == 0) { 930 memcmp(addr, mpath->dst, ETH_ALEN) == 0) {
915 spin_lock_bh(&mpath->state_lock); 931 __mesh_path_del(tbl, node);
916 if (mpath->is_gate)
917 mesh_gate_del(tbl, mpath);
918 mpath->flags |= MESH_PATH_RESOLVING;
919 hlist_del_rcu(&node->list);
920 call_rcu(&node->rcu, mesh_path_node_reclaim);
921 atomic_dec(&tbl->entries);
922 spin_unlock_bh(&mpath->state_lock);
923 goto enddel; 932 goto enddel;
924 } 933 }
925 } 934 }
@@ -1160,6 +1169,7 @@ void mesh_path_expire(struct ieee80211_sub_if_data *sdata)
1160 (!(mpath->flags & MESH_PATH_FIXED)) && 1169 (!(mpath->flags & MESH_PATH_FIXED)) &&
1161 time_after(jiffies, mpath->exp_time + MESH_PATH_EXPIRE)) 1170 time_after(jiffies, mpath->exp_time + MESH_PATH_EXPIRE))
1162 mesh_path_del(mpath->dst, mpath->sdata); 1171 mesh_path_del(mpath->dst, mpath->sdata);
1172 }
1163 rcu_read_unlock(); 1173 rcu_read_unlock();
1164} 1174}
1165 1175