aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorJohannes Berg <johannes.berg@intel.com>2011-05-14 05:00:52 -0400
committerJohn W. Linville <linville@tuxdriver.com>2011-05-16 14:25:29 -0400
commit1928ecab620907a0953f811316d05f367f3f4dba (patch)
treec97ce24a7d86ad58c148ef8905c72c5705d12f4a /net
parentd07c7cf49ae7c488e778c4d668f4cc10bd2fa971 (diff)
mac80211: fix and simplify mesh locking
The locking in mesh_{mpath,mpp}_table_grow not only has an rcu_read_unlock() missing, it's also racy (though really only technically since it's invoked from a single function only) since it obtains the new size of the table without any locking, so two invocations of the function could attempt the same resize. Additionally, it uses synchronize_rcu() which is rather expensive and can be avoided trivially here. Modify the functions to only use the table lock and use call_rcu() instead of synchronize_rcu(). Signed-off-by: Johannes Berg <johannes.berg@intel.com> Signed-off-by: John W. Linville <linville@tuxdriver.com>
Diffstat (limited to 'net')
-rw-r--r--net/mac80211/mesh.h3
-rw-r--r--net/mac80211/mesh_pathtbl.c44
2 files changed, 25 insertions, 22 deletions
diff --git a/net/mac80211/mesh.h b/net/mac80211/mesh.h
index e7c5fddb4804..eb733c0d61aa 100644
--- a/net/mac80211/mesh.h
+++ b/net/mac80211/mesh.h
@@ -120,6 +120,7 @@ struct mesh_path {
120 * buckets 120 * buckets
121 * @mean_chain_len: maximum average length for the hash buckets' list, if it is 121 * @mean_chain_len: maximum average length for the hash buckets' list, if it is
122 * reached, the table will grow 122 * reached, the table will grow
123 * rcu_head: RCU head to free the table
123 */ 124 */
124struct mesh_table { 125struct mesh_table {
125 /* Number of buckets will be 2^N */ 126 /* Number of buckets will be 2^N */
@@ -132,6 +133,8 @@ struct mesh_table {
132 int (*copy_node) (struct hlist_node *p, struct mesh_table *newtbl); 133 int (*copy_node) (struct hlist_node *p, struct mesh_table *newtbl);
133 int size_order; 134 int size_order;
134 int mean_chain_len; 135 int mean_chain_len;
136
137 struct rcu_head rcu_head;
135}; 138};
136 139
137/* Recent multicast cache */ 140/* Recent multicast cache */
diff --git a/net/mac80211/mesh_pathtbl.c b/net/mac80211/mesh_pathtbl.c
index f775202552e5..74021365b8c8 100644
--- a/net/mac80211/mesh_pathtbl.c
+++ b/net/mac80211/mesh_pathtbl.c
@@ -370,52 +370,52 @@ err_path_alloc:
370 return err; 370 return err;
371} 371}
372 372
373static void mesh_table_free_rcu(struct rcu_head *rcu)
374{
375 struct mesh_table *tbl = container_of(rcu, struct mesh_table, rcu_head);
376
377 mesh_table_free(tbl, false);
378}
379
373void mesh_mpath_table_grow(void) 380void mesh_mpath_table_grow(void)
374{ 381{
375 struct mesh_table *oldtbl, *newtbl; 382 struct mesh_table *oldtbl, *newtbl;
376 383
377 rcu_read_lock();
378 newtbl = mesh_table_alloc(rcu_dereference(mesh_paths)->size_order + 1);
379 if (!newtbl)
380 return;
381 write_lock_bh(&pathtbl_resize_lock); 384 write_lock_bh(&pathtbl_resize_lock);
385 newtbl = mesh_table_alloc(mesh_paths->size_order + 1);
386 if (!newtbl)
387 goto out;
382 oldtbl = mesh_paths; 388 oldtbl = mesh_paths;
383 if (mesh_table_grow(mesh_paths, newtbl) < 0) { 389 if (mesh_table_grow(mesh_paths, newtbl) < 0) {
384 rcu_read_unlock();
385 __mesh_table_free(newtbl); 390 __mesh_table_free(newtbl);
386 write_unlock_bh(&pathtbl_resize_lock); 391 goto out;
387 return;
388 } 392 }
389 rcu_read_unlock();
390 rcu_assign_pointer(mesh_paths, newtbl); 393 rcu_assign_pointer(mesh_paths, newtbl);
391 write_unlock_bh(&pathtbl_resize_lock);
392 394
393 synchronize_rcu(); 395 call_rcu(&oldtbl->rcu_head, mesh_table_free_rcu);
394 mesh_table_free(oldtbl, false); 396
397 out:
398 write_unlock_bh(&pathtbl_resize_lock);
395} 399}
396 400
397void mesh_mpp_table_grow(void) 401void mesh_mpp_table_grow(void)
398{ 402{
399 struct mesh_table *oldtbl, *newtbl; 403 struct mesh_table *oldtbl, *newtbl;
400 404
401 rcu_read_lock();
402 newtbl = mesh_table_alloc(rcu_dereference(mpp_paths)->size_order + 1);
403 if (!newtbl)
404 return;
405 write_lock_bh(&pathtbl_resize_lock); 405 write_lock_bh(&pathtbl_resize_lock);
406 newtbl = mesh_table_alloc(mpp_paths->size_order + 1);
407 if (!newtbl)
408 goto out;
406 oldtbl = mpp_paths; 409 oldtbl = mpp_paths;
407 if (mesh_table_grow(mpp_paths, newtbl) < 0) { 410 if (mesh_table_grow(mpp_paths, newtbl) < 0) {
408 rcu_read_unlock();
409 __mesh_table_free(newtbl); 411 __mesh_table_free(newtbl);
410 write_unlock_bh(&pathtbl_resize_lock); 412 goto out;
411 return;
412 } 413 }
413 rcu_read_unlock();
414 rcu_assign_pointer(mpp_paths, newtbl); 414 rcu_assign_pointer(mpp_paths, newtbl);
415 write_unlock_bh(&pathtbl_resize_lock); 415 call_rcu(&oldtbl->rcu_head, mesh_table_free_rcu);
416 416
417 synchronize_rcu(); 417 out:
418 mesh_table_free(oldtbl, false); 418 write_unlock_bh(&pathtbl_resize_lock);
419} 419}
420 420
421int mpp_path_add(u8 *dst, u8 *mpp, struct ieee80211_sub_if_data *sdata) 421int mpp_path_add(u8 *dst, u8 *mpp, struct ieee80211_sub_if_data *sdata)