aboutsummaryrefslogtreecommitdiffstats
path: root/net/mac80211/mesh_pathtbl.c
diff options
context:
space:
mode:
Diffstat (limited to 'net/mac80211/mesh_pathtbl.c')
-rw-r--r--net/mac80211/mesh_pathtbl.c127
1 files changed, 126 insertions, 1 deletions
diff --git a/net/mac80211/mesh_pathtbl.c b/net/mac80211/mesh_pathtbl.c
index e4fa2905fadc..3c72557df45a 100644
--- a/net/mac80211/mesh_pathtbl.c
+++ b/net/mac80211/mesh_pathtbl.c
@@ -36,6 +36,7 @@ struct mpath_node {
36}; 36};
37 37
38static struct mesh_table *mesh_paths; 38static struct mesh_table *mesh_paths;
39static struct mesh_table *mpp_paths; /* Store paths for MPP&MAP */
39 40
40/* This lock will have the grow table function as writer and add / delete nodes 41/* This lock will have the grow table function as writer and add / delete nodes
41 * as readers. When reading the table (i.e. doing lookups) we are well protected 42 * as readers. When reading the table (i.e. doing lookups) we are well protected
@@ -94,6 +95,34 @@ struct mesh_path *mesh_path_lookup(u8 *dst, struct ieee80211_sub_if_data *sdata)
94 return NULL; 95 return NULL;
95} 96}
96 97
98struct mesh_path *mpp_path_lookup(u8 *dst, struct ieee80211_sub_if_data *sdata)
99{
100 struct mesh_path *mpath;
101 struct hlist_node *n;
102 struct hlist_head *bucket;
103 struct mesh_table *tbl;
104 struct mpath_node *node;
105
106 tbl = rcu_dereference(mpp_paths);
107
108 bucket = &tbl->hash_buckets[mesh_table_hash(dst, sdata, tbl)];
109 hlist_for_each_entry_rcu(node, n, bucket, list) {
110 mpath = node->mpath;
111 if (mpath->sdata == sdata &&
112 memcmp(dst, mpath->dst, ETH_ALEN) == 0) {
113 if (MPATH_EXPIRED(mpath)) {
114 spin_lock_bh(&mpath->state_lock);
115 if (MPATH_EXPIRED(mpath))
116 mpath->flags &= ~MESH_PATH_ACTIVE;
117 spin_unlock_bh(&mpath->state_lock);
118 }
119 return mpath;
120 }
121 }
122 return NULL;
123}
124
125
97/** 126/**
98 * mesh_path_lookup_by_idx - look up a path in the mesh path table by its index 127 * mesh_path_lookup_by_idx - look up a path in the mesh path table by its index
99 * @idx: index 128 * @idx: index
@@ -226,6 +255,91 @@ err_path_alloc:
226} 255}
227 256
228 257
258int mpp_path_add(u8 *dst, u8 *mpp, struct ieee80211_sub_if_data *sdata)
259{
260 struct mesh_path *mpath, *new_mpath;
261 struct mpath_node *node, *new_node;
262 struct hlist_head *bucket;
263 struct hlist_node *n;
264 int grow = 0;
265 int err = 0;
266 u32 hash_idx;
267
268
269 if (memcmp(dst, sdata->dev->dev_addr, ETH_ALEN) == 0)
270 /* never add ourselves as neighbours */
271 return -ENOTSUPP;
272
273 if (is_multicast_ether_addr(dst))
274 return -ENOTSUPP;
275
276 err = -ENOMEM;
277 new_mpath = kzalloc(sizeof(struct mesh_path), GFP_KERNEL);
278 if (!new_mpath)
279 goto err_path_alloc;
280
281 new_node = kmalloc(sizeof(struct mpath_node), GFP_KERNEL);
282 if (!new_node)
283 goto err_node_alloc;
284
285 read_lock(&pathtbl_resize_lock);
286 memcpy(new_mpath->dst, dst, ETH_ALEN);
287 memcpy(new_mpath->mpp, mpp, ETH_ALEN);
288 new_mpath->sdata = sdata;
289 new_mpath->flags = 0;
290 skb_queue_head_init(&new_mpath->frame_queue);
291 new_node->mpath = new_mpath;
292 new_mpath->exp_time = jiffies;
293 spin_lock_init(&new_mpath->state_lock);
294
295 hash_idx = mesh_table_hash(dst, sdata, mpp_paths);
296 bucket = &mpp_paths->hash_buckets[hash_idx];
297
298 spin_lock(&mpp_paths->hashwlock[hash_idx]);
299
300 err = -EEXIST;
301 hlist_for_each_entry(node, n, bucket, list) {
302 mpath = node->mpath;
303 if (mpath->sdata == sdata && memcmp(dst, mpath->dst, ETH_ALEN) == 0)
304 goto err_exists;
305 }
306
307 hlist_add_head_rcu(&new_node->list, bucket);
308 if (atomic_inc_return(&mpp_paths->entries) >=
309 mpp_paths->mean_chain_len * (mpp_paths->hash_mask + 1))
310 grow = 1;
311
312 spin_unlock(&mpp_paths->hashwlock[hash_idx]);
313 read_unlock(&pathtbl_resize_lock);
314 if (grow) {
315 struct mesh_table *oldtbl, *newtbl;
316
317 write_lock(&pathtbl_resize_lock);
318 oldtbl = mpp_paths;
319 newtbl = mesh_table_grow(mpp_paths);
320 if (!newtbl) {
321 write_unlock(&pathtbl_resize_lock);
322 return 0;
323 }
324 rcu_assign_pointer(mpp_paths, newtbl);
325 write_unlock(&pathtbl_resize_lock);
326
327 synchronize_rcu();
328 mesh_table_free(oldtbl, false);
329 }
330 return 0;
331
332err_exists:
333 spin_unlock(&mpp_paths->hashwlock[hash_idx]);
334 read_unlock(&pathtbl_resize_lock);
335 kfree(new_node);
336err_node_alloc:
337 kfree(new_mpath);
338err_path_alloc:
339 return err;
340}
341
342
229/** 343/**
230 * mesh_plink_broken - deactivates paths and sends perr when a link breaks 344 * mesh_plink_broken - deactivates paths and sends perr when a link breaks
231 * 345 *
@@ -475,11 +589,21 @@ static int mesh_path_node_copy(struct hlist_node *p, struct mesh_table *newtbl)
475int mesh_pathtbl_init(void) 589int mesh_pathtbl_init(void)
476{ 590{
477 mesh_paths = mesh_table_alloc(INIT_PATHS_SIZE_ORDER); 591 mesh_paths = mesh_table_alloc(INIT_PATHS_SIZE_ORDER);
592 if (!mesh_paths)
593 return -ENOMEM;
478 mesh_paths->free_node = &mesh_path_node_free; 594 mesh_paths->free_node = &mesh_path_node_free;
479 mesh_paths->copy_node = &mesh_path_node_copy; 595 mesh_paths->copy_node = &mesh_path_node_copy;
480 mesh_paths->mean_chain_len = MEAN_CHAIN_LEN; 596 mesh_paths->mean_chain_len = MEAN_CHAIN_LEN;
481 if (!mesh_paths) 597
598 mpp_paths = mesh_table_alloc(INIT_PATHS_SIZE_ORDER);
599 if (!mpp_paths) {
600 mesh_table_free(mesh_paths, true);
482 return -ENOMEM; 601 return -ENOMEM;
602 }
603 mpp_paths->free_node = &mesh_path_node_free;
604 mpp_paths->copy_node = &mesh_path_node_copy;
605 mpp_paths->mean_chain_len = MEAN_CHAIN_LEN;
606
483 return 0; 607 return 0;
484} 608}
485 609
@@ -511,4 +635,5 @@ void mesh_path_expire(struct ieee80211_sub_if_data *sdata)
511void mesh_pathtbl_unregister(void) 635void mesh_pathtbl_unregister(void)
512{ 636{
513 mesh_table_free(mesh_paths, true); 637 mesh_table_free(mesh_paths, true);
638 mesh_table_free(mpp_paths, true);
514} 639}