aboutsummaryrefslogtreecommitdiffstats
path: root/net/mac80211/mesh_pathtbl.c
diff options
context:
space:
mode:
authorJavier Cardona <javier@cozybit.com>2009-08-10 15:15:52 -0400
committerJohn W. Linville <linville@tuxdriver.com>2009-08-14 09:14:01 -0400
commit18889231e4527dfe23145efe318e74744794a95d (patch)
treebcb509dab37d80f6dbbfb6671a530aa882c9975e /net/mac80211/mesh_pathtbl.c
parent5b365834255d7c90fc724b032c814dfa297aacf9 (diff)
mac80211: Move mpath and mpp growth to mesh workqueue.
This prevents calling rcu_synchronize from within the tx path by moving the table growth code to the mesh workqueue. Move mesh_table_free and mesh_table_grow from mesh.c to mesh_pathtbl.c and declare them static. Also, re-enable mesh in Kconfig and update the configuration description. Signed-off-by: Javier Cardona <javier@cozybit.com> Tested-by: Andrey Yurovsky <andrey@cozybit.com> Signed-off-by: John W. Linville <linville@tuxdriver.com>
Diffstat (limited to 'net/mac80211/mesh_pathtbl.c')
-rw-r--r--net/mac80211/mesh_pathtbl.c146
1 files changed, 110 insertions, 36 deletions
diff --git a/net/mac80211/mesh_pathtbl.c b/net/mac80211/mesh_pathtbl.c
index 431865a58622..751c4d0e2b36 100644
--- a/net/mac80211/mesh_pathtbl.c
+++ b/net/mac80211/mesh_pathtbl.c
@@ -39,6 +39,69 @@ static struct mesh_table *mesh_paths;
39static struct mesh_table *mpp_paths; /* Store paths for MPP&MAP */ 39static struct mesh_table *mpp_paths; /* Store paths for MPP&MAP */
40 40
41int mesh_paths_generation; 41int mesh_paths_generation;
42static void __mesh_table_free(struct mesh_table *tbl)
43{
44 kfree(tbl->hash_buckets);
45 kfree(tbl->hashwlock);
46 kfree(tbl);
47}
48
49void mesh_table_free(struct mesh_table *tbl, bool free_leafs)
50{
51 struct hlist_head *mesh_hash;
52 struct hlist_node *p, *q;
53 int i;
54
55 mesh_hash = tbl->hash_buckets;
56 for (i = 0; i <= tbl->hash_mask; i++) {
57 spin_lock(&tbl->hashwlock[i]);
58 hlist_for_each_safe(p, q, &mesh_hash[i]) {
59 tbl->free_node(p, free_leafs);
60 atomic_dec(&tbl->entries);
61 }
62 spin_unlock(&tbl->hashwlock[i]);
63 }
64 __mesh_table_free(tbl);
65}
66
67static struct mesh_table *mesh_table_grow(struct mesh_table *tbl)
68{
69 struct mesh_table *newtbl;
70 struct hlist_head *oldhash;
71 struct hlist_node *p, *q;
72 int i;
73
74 if (atomic_read(&tbl->entries)
75 < tbl->mean_chain_len * (tbl->hash_mask + 1))
76 goto endgrow;
77
78 newtbl = mesh_table_alloc(tbl->size_order + 1);
79 if (!newtbl)
80 goto endgrow;
81
82 newtbl->free_node = tbl->free_node;
83 newtbl->mean_chain_len = tbl->mean_chain_len;
84 newtbl->copy_node = tbl->copy_node;
85 atomic_set(&newtbl->entries, atomic_read(&tbl->entries));
86
87 oldhash = tbl->hash_buckets;
88 for (i = 0; i <= tbl->hash_mask; i++)
89 hlist_for_each(p, &oldhash[i])
90 if (tbl->copy_node(p, newtbl) < 0)
91 goto errcopy;
92
93 return newtbl;
94
95errcopy:
96 for (i = 0; i <= newtbl->hash_mask; i++) {
97 hlist_for_each_safe(p, q, &newtbl->hash_buckets[i])
98 tbl->free_node(p, 0);
99 }
100 __mesh_table_free(newtbl);
101endgrow:
102 return NULL;
103}
104
42 105
43/* This lock will have the grow table function as writer and add / delete nodes 106/* This lock will have the grow table function as writer and add / delete nodes
44 * as readers. When reading the table (i.e. doing lookups) we are well protected 107 * as readers. When reading the table (i.e. doing lookups) we are well protected
@@ -187,6 +250,8 @@ struct mesh_path *mesh_path_lookup_by_idx(int idx, struct ieee80211_sub_if_data
187 */ 250 */
188int mesh_path_add(u8 *dst, struct ieee80211_sub_if_data *sdata) 251int mesh_path_add(u8 *dst, struct ieee80211_sub_if_data *sdata)
189{ 252{
253 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
254 struct ieee80211_local *local = sdata->local;
190 struct mesh_path *mpath, *new_mpath; 255 struct mesh_path *mpath, *new_mpath;
191 struct mpath_node *node, *new_node; 256 struct mpath_node *node, *new_node;
192 struct hlist_head *bucket; 257 struct hlist_head *bucket;
@@ -195,8 +260,6 @@ int mesh_path_add(u8 *dst, struct ieee80211_sub_if_data *sdata)
195 int err = 0; 260 int err = 0;
196 u32 hash_idx; 261 u32 hash_idx;
197 262
198 might_sleep();
199
200 if (memcmp(dst, sdata->dev->dev_addr, ETH_ALEN) == 0) 263 if (memcmp(dst, sdata->dev->dev_addr, ETH_ALEN) == 0)
201 /* never add ourselves as neighbours */ 264 /* never add ourselves as neighbours */
202 return -ENOTSUPP; 265 return -ENOTSUPP;
@@ -208,11 +271,11 @@ int mesh_path_add(u8 *dst, struct ieee80211_sub_if_data *sdata)
208 return -ENOSPC; 271 return -ENOSPC;
209 272
210 err = -ENOMEM; 273 err = -ENOMEM;
211 new_mpath = kzalloc(sizeof(struct mesh_path), GFP_KERNEL); 274 new_mpath = kzalloc(sizeof(struct mesh_path), GFP_ATOMIC);
212 if (!new_mpath) 275 if (!new_mpath)
213 goto err_path_alloc; 276 goto err_path_alloc;
214 277
215 new_node = kmalloc(sizeof(struct mpath_node), GFP_KERNEL); 278 new_node = kmalloc(sizeof(struct mpath_node), GFP_ATOMIC);
216 if (!new_node) 279 if (!new_node)
217 goto err_node_alloc; 280 goto err_node_alloc;
218 281
@@ -250,20 +313,8 @@ int mesh_path_add(u8 *dst, struct ieee80211_sub_if_data *sdata)
250 spin_unlock(&mesh_paths->hashwlock[hash_idx]); 313 spin_unlock(&mesh_paths->hashwlock[hash_idx]);
251 read_unlock(&pathtbl_resize_lock); 314 read_unlock(&pathtbl_resize_lock);
252 if (grow) { 315 if (grow) {
253 struct mesh_table *oldtbl, *newtbl; 316 set_bit(MESH_WORK_GROW_MPATH_TABLE, &ifmsh->wrkq_flags);
254 317 ieee80211_queue_work(&local->hw, &ifmsh->work);
255 write_lock(&pathtbl_resize_lock);
256 oldtbl = mesh_paths;
257 newtbl = mesh_table_grow(mesh_paths);
258 if (!newtbl) {
259 write_unlock(&pathtbl_resize_lock);
260 return 0;
261 }
262 rcu_assign_pointer(mesh_paths, newtbl);
263 write_unlock(&pathtbl_resize_lock);
264
265 synchronize_rcu();
266 mesh_table_free(oldtbl, false);
267 } 318 }
268 return 0; 319 return 0;
269 320
@@ -278,9 +329,46 @@ err_path_alloc:
278 return err; 329 return err;
279} 330}
280 331
332void mesh_mpath_table_grow(void)
333{
334 struct mesh_table *oldtbl, *newtbl;
335
336 write_lock(&pathtbl_resize_lock);
337 oldtbl = mesh_paths;
338 newtbl = mesh_table_grow(mesh_paths);
339 if (!newtbl) {
340 write_unlock(&pathtbl_resize_lock);
341 return;
342 }
343 rcu_assign_pointer(mesh_paths, newtbl);
344 write_unlock(&pathtbl_resize_lock);
345
346 synchronize_rcu();
347 mesh_table_free(oldtbl, false);
348}
349
350void mesh_mpp_table_grow(void)
351{
352 struct mesh_table *oldtbl, *newtbl;
353
354 write_lock(&pathtbl_resize_lock);
355 oldtbl = mpp_paths;
356 newtbl = mesh_table_grow(mpp_paths);
357 if (!newtbl) {
358 write_unlock(&pathtbl_resize_lock);
359 return;
360 }
361 rcu_assign_pointer(mpp_paths, newtbl);
362 write_unlock(&pathtbl_resize_lock);
363
364 synchronize_rcu();
365 mesh_table_free(oldtbl, false);
366}
281 367
282int mpp_path_add(u8 *dst, u8 *mpp, struct ieee80211_sub_if_data *sdata) 368int mpp_path_add(u8 *dst, u8 *mpp, struct ieee80211_sub_if_data *sdata)
283{ 369{
370 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
371 struct ieee80211_local *local = sdata->local;
284 struct mesh_path *mpath, *new_mpath; 372 struct mesh_path *mpath, *new_mpath;
285 struct mpath_node *node, *new_node; 373 struct mpath_node *node, *new_node;
286 struct hlist_head *bucket; 374 struct hlist_head *bucket;
@@ -289,8 +377,6 @@ int mpp_path_add(u8 *dst, u8 *mpp, struct ieee80211_sub_if_data *sdata)
289 int err = 0; 377 int err = 0;
290 u32 hash_idx; 378 u32 hash_idx;
291 379
292 might_sleep();
293
294 if (memcmp(dst, sdata->dev->dev_addr, ETH_ALEN) == 0) 380 if (memcmp(dst, sdata->dev->dev_addr, ETH_ALEN) == 0)
295 /* never add ourselves as neighbours */ 381 /* never add ourselves as neighbours */
296 return -ENOTSUPP; 382 return -ENOTSUPP;
@@ -299,11 +385,11 @@ int mpp_path_add(u8 *dst, u8 *mpp, struct ieee80211_sub_if_data *sdata)
299 return -ENOTSUPP; 385 return -ENOTSUPP;
300 386
301 err = -ENOMEM; 387 err = -ENOMEM;
302 new_mpath = kzalloc(sizeof(struct mesh_path), GFP_KERNEL); 388 new_mpath = kzalloc(sizeof(struct mesh_path), GFP_ATOMIC);
303 if (!new_mpath) 389 if (!new_mpath)
304 goto err_path_alloc; 390 goto err_path_alloc;
305 391
306 new_node = kmalloc(sizeof(struct mpath_node), GFP_KERNEL); 392 new_node = kmalloc(sizeof(struct mpath_node), GFP_ATOMIC);
307 if (!new_node) 393 if (!new_node)
308 goto err_node_alloc; 394 goto err_node_alloc;
309 395
@@ -337,20 +423,8 @@ int mpp_path_add(u8 *dst, u8 *mpp, struct ieee80211_sub_if_data *sdata)
337 spin_unlock(&mpp_paths->hashwlock[hash_idx]); 423 spin_unlock(&mpp_paths->hashwlock[hash_idx]);
338 read_unlock(&pathtbl_resize_lock); 424 read_unlock(&pathtbl_resize_lock);
339 if (grow) { 425 if (grow) {
340 struct mesh_table *oldtbl, *newtbl; 426 set_bit(MESH_WORK_GROW_MPP_TABLE, &ifmsh->wrkq_flags);
341 427 ieee80211_queue_work(&local->hw, &ifmsh->work);
342 write_lock(&pathtbl_resize_lock);
343 oldtbl = mpp_paths;
344 newtbl = mesh_table_grow(mpp_paths);
345 if (!newtbl) {
346 write_unlock(&pathtbl_resize_lock);
347 return 0;
348 }
349 rcu_assign_pointer(mpp_paths, newtbl);
350 write_unlock(&pathtbl_resize_lock);
351
352 synchronize_rcu();
353 mesh_table_free(oldtbl, false);
354 } 428 }
355 return 0; 429 return 0;
356 430