diff options
author | Javier Cardona <javier@cozybit.com> | 2011-05-03 19:57:16 -0400 |
---|---|---|
committer | John W. Linville <linville@tuxdriver.com> | 2011-05-11 14:50:35 -0400 |
commit | 9b84b80891e5e25ae21c855bb135b05274125a29 (patch) | |
tree | 1e833eb357f66aba37980b240de48671eec900a6 /net/mac80211/mesh_pathtbl.c | |
parent | 9ca99eeca0cfe839c481f3350988e9ed94188567 (diff) |
mac80211: Fix locking bug on mesh path table access
The mesh and mpp path tables are accessed from softirq and workqueue
context so non-irq locking cannot be used. Or at least that's what
PROVE_RCU seems to tell us here:
[ 431.240946] =================================
[ 431.241061] [ INFO: inconsistent lock state ]
[ 431.241061] 2.6.39-rc3-wl+ #354
[ 431.241061] ---------------------------------
[ 431.241061] inconsistent {IN-SOFTIRQ-W} -> {SOFTIRQ-ON-W} usage.
[ 431.241061] kworker/u:1/1423 [HC0[0]:SC0[0]:HE1:SE1] takes:
[ 431.241061] (&(&newtbl->hashwlock[i])->rlock){+.?...}, at:
[<c14671bf>] mesh_path_add+0x167/0x257
Signed-off-by: Javier Cardona <javier@cozybit.com>
Signed-off-by: John W. Linville <linville@tuxdriver.com>
Diffstat (limited to 'net/mac80211/mesh_pathtbl.c')
-rw-r--r-- | net/mac80211/mesh_pathtbl.c | 54 |
1 files changed, 28 insertions, 26 deletions
diff --git a/net/mac80211/mesh_pathtbl.c b/net/mac80211/mesh_pathtbl.c index c1a2bf2aa2de..c7cb3cc083df 100644 --- a/net/mac80211/mesh_pathtbl.c +++ b/net/mac80211/mesh_pathtbl.c | |||
@@ -55,12 +55,12 @@ void mesh_table_free(struct mesh_table *tbl, bool free_leafs) | |||
55 | 55 | ||
56 | mesh_hash = tbl->hash_buckets; | 56 | mesh_hash = tbl->hash_buckets; |
57 | for (i = 0; i <= tbl->hash_mask; i++) { | 57 | for (i = 0; i <= tbl->hash_mask; i++) { |
58 | spin_lock(&tbl->hashwlock[i]); | 58 | spin_lock_bh(&tbl->hashwlock[i]); |
59 | hlist_for_each_safe(p, q, &mesh_hash[i]) { | 59 | hlist_for_each_safe(p, q, &mesh_hash[i]) { |
60 | tbl->free_node(p, free_leafs); | 60 | tbl->free_node(p, free_leafs); |
61 | atomic_dec(&tbl->entries); | 61 | atomic_dec(&tbl->entries); |
62 | } | 62 | } |
63 | spin_unlock(&tbl->hashwlock[i]); | 63 | spin_unlock_bh(&tbl->hashwlock[i]); |
64 | } | 64 | } |
65 | __mesh_table_free(tbl); | 65 | __mesh_table_free(tbl); |
66 | } | 66 | } |
@@ -274,7 +274,7 @@ int mesh_path_add(u8 *dst, struct ieee80211_sub_if_data *sdata) | |||
274 | if (!new_node) | 274 | if (!new_node) |
275 | goto err_node_alloc; | 275 | goto err_node_alloc; |
276 | 276 | ||
277 | read_lock(&pathtbl_resize_lock); | 277 | read_lock_bh(&pathtbl_resize_lock); |
278 | memcpy(new_mpath->dst, dst, ETH_ALEN); | 278 | memcpy(new_mpath->dst, dst, ETH_ALEN); |
279 | new_mpath->sdata = sdata; | 279 | new_mpath->sdata = sdata; |
280 | new_mpath->flags = 0; | 280 | new_mpath->flags = 0; |
@@ -289,7 +289,7 @@ int mesh_path_add(u8 *dst, struct ieee80211_sub_if_data *sdata) | |||
289 | hash_idx = mesh_table_hash(dst, sdata, mesh_paths); | 289 | hash_idx = mesh_table_hash(dst, sdata, mesh_paths); |
290 | bucket = &mesh_paths->hash_buckets[hash_idx]; | 290 | bucket = &mesh_paths->hash_buckets[hash_idx]; |
291 | 291 | ||
292 | spin_lock(&mesh_paths->hashwlock[hash_idx]); | 292 | spin_lock_bh(&mesh_paths->hashwlock[hash_idx]); |
293 | 293 | ||
294 | err = -EEXIST; | 294 | err = -EEXIST; |
295 | hlist_for_each_entry(node, n, bucket, list) { | 295 | hlist_for_each_entry(node, n, bucket, list) { |
@@ -305,8 +305,8 @@ int mesh_path_add(u8 *dst, struct ieee80211_sub_if_data *sdata) | |||
305 | 305 | ||
306 | mesh_paths_generation++; | 306 | mesh_paths_generation++; |
307 | 307 | ||
308 | spin_unlock(&mesh_paths->hashwlock[hash_idx]); | 308 | spin_unlock_bh(&mesh_paths->hashwlock[hash_idx]); |
309 | read_unlock(&pathtbl_resize_lock); | 309 | read_unlock_bh(&pathtbl_resize_lock); |
310 | if (grow) { | 310 | if (grow) { |
311 | set_bit(MESH_WORK_GROW_MPATH_TABLE, &ifmsh->wrkq_flags); | 311 | set_bit(MESH_WORK_GROW_MPATH_TABLE, &ifmsh->wrkq_flags); |
312 | ieee80211_queue_work(&local->hw, &sdata->work); | 312 | ieee80211_queue_work(&local->hw, &sdata->work); |
@@ -314,8 +314,8 @@ int mesh_path_add(u8 *dst, struct ieee80211_sub_if_data *sdata) | |||
314 | return 0; | 314 | return 0; |
315 | 315 | ||
316 | err_exists: | 316 | err_exists: |
317 | spin_unlock(&mesh_paths->hashwlock[hash_idx]); | 317 | spin_unlock_bh(&mesh_paths->hashwlock[hash_idx]); |
318 | read_unlock(&pathtbl_resize_lock); | 318 | read_unlock_bh(&pathtbl_resize_lock); |
319 | kfree(new_node); | 319 | kfree(new_node); |
320 | err_node_alloc: | 320 | err_node_alloc: |
321 | kfree(new_mpath); | 321 | kfree(new_mpath); |
@@ -332,16 +332,17 @@ void mesh_mpath_table_grow(void) | |||
332 | newtbl = mesh_table_alloc(rcu_dereference(mesh_paths)->size_order + 1); | 332 | newtbl = mesh_table_alloc(rcu_dereference(mesh_paths)->size_order + 1); |
333 | if (!newtbl) | 333 | if (!newtbl) |
334 | return; | 334 | return; |
335 | write_lock(&pathtbl_resize_lock); | 335 | write_lock_bh(&pathtbl_resize_lock); |
336 | oldtbl = mesh_paths; | 336 | oldtbl = mesh_paths; |
337 | if (mesh_table_grow(mesh_paths, newtbl) < 0) { | 337 | if (mesh_table_grow(mesh_paths, newtbl) < 0) { |
338 | rcu_read_unlock(); | ||
338 | __mesh_table_free(newtbl); | 339 | __mesh_table_free(newtbl); |
339 | write_unlock(&pathtbl_resize_lock); | 340 | write_unlock_bh(&pathtbl_resize_lock); |
340 | return; | 341 | return; |
341 | } | 342 | } |
342 | rcu_read_unlock(); | 343 | rcu_read_unlock(); |
343 | rcu_assign_pointer(mesh_paths, newtbl); | 344 | rcu_assign_pointer(mesh_paths, newtbl); |
344 | write_unlock(&pathtbl_resize_lock); | 345 | write_unlock_bh(&pathtbl_resize_lock); |
345 | 346 | ||
346 | synchronize_rcu(); | 347 | synchronize_rcu(); |
347 | mesh_table_free(oldtbl, false); | 348 | mesh_table_free(oldtbl, false); |
@@ -355,16 +356,17 @@ void mesh_mpp_table_grow(void) | |||
355 | newtbl = mesh_table_alloc(rcu_dereference(mpp_paths)->size_order + 1); | 356 | newtbl = mesh_table_alloc(rcu_dereference(mpp_paths)->size_order + 1); |
356 | if (!newtbl) | 357 | if (!newtbl) |
357 | return; | 358 | return; |
358 | write_lock(&pathtbl_resize_lock); | 359 | write_lock_bh(&pathtbl_resize_lock); |
359 | oldtbl = mpp_paths; | 360 | oldtbl = mpp_paths; |
360 | if (mesh_table_grow(mpp_paths, newtbl) < 0) { | 361 | if (mesh_table_grow(mpp_paths, newtbl) < 0) { |
362 | rcu_read_unlock(); | ||
361 | __mesh_table_free(newtbl); | 363 | __mesh_table_free(newtbl); |
362 | write_unlock(&pathtbl_resize_lock); | 364 | write_unlock_bh(&pathtbl_resize_lock); |
363 | return; | 365 | return; |
364 | } | 366 | } |
365 | rcu_read_unlock(); | 367 | rcu_read_unlock(); |
366 | rcu_assign_pointer(mpp_paths, newtbl); | 368 | rcu_assign_pointer(mpp_paths, newtbl); |
367 | write_unlock(&pathtbl_resize_lock); | 369 | write_unlock_bh(&pathtbl_resize_lock); |
368 | 370 | ||
369 | synchronize_rcu(); | 371 | synchronize_rcu(); |
370 | mesh_table_free(oldtbl, false); | 372 | mesh_table_free(oldtbl, false); |
@@ -398,7 +400,7 @@ int mpp_path_add(u8 *dst, u8 *mpp, struct ieee80211_sub_if_data *sdata) | |||
398 | if (!new_node) | 400 | if (!new_node) |
399 | goto err_node_alloc; | 401 | goto err_node_alloc; |
400 | 402 | ||
401 | read_lock(&pathtbl_resize_lock); | 403 | read_lock_bh(&pathtbl_resize_lock); |
402 | memcpy(new_mpath->dst, dst, ETH_ALEN); | 404 | memcpy(new_mpath->dst, dst, ETH_ALEN); |
403 | memcpy(new_mpath->mpp, mpp, ETH_ALEN); | 405 | memcpy(new_mpath->mpp, mpp, ETH_ALEN); |
404 | new_mpath->sdata = sdata; | 406 | new_mpath->sdata = sdata; |
@@ -411,7 +413,7 @@ int mpp_path_add(u8 *dst, u8 *mpp, struct ieee80211_sub_if_data *sdata) | |||
411 | hash_idx = mesh_table_hash(dst, sdata, mpp_paths); | 413 | hash_idx = mesh_table_hash(dst, sdata, mpp_paths); |
412 | bucket = &mpp_paths->hash_buckets[hash_idx]; | 414 | bucket = &mpp_paths->hash_buckets[hash_idx]; |
413 | 415 | ||
414 | spin_lock(&mpp_paths->hashwlock[hash_idx]); | 416 | spin_lock_bh(&mpp_paths->hashwlock[hash_idx]); |
415 | 417 | ||
416 | err = -EEXIST; | 418 | err = -EEXIST; |
417 | hlist_for_each_entry(node, n, bucket, list) { | 419 | hlist_for_each_entry(node, n, bucket, list) { |
@@ -425,8 +427,8 @@ int mpp_path_add(u8 *dst, u8 *mpp, struct ieee80211_sub_if_data *sdata) | |||
425 | mpp_paths->mean_chain_len * (mpp_paths->hash_mask + 1)) | 427 | mpp_paths->mean_chain_len * (mpp_paths->hash_mask + 1)) |
426 | grow = 1; | 428 | grow = 1; |
427 | 429 | ||
428 | spin_unlock(&mpp_paths->hashwlock[hash_idx]); | 430 | spin_unlock_bh(&mpp_paths->hashwlock[hash_idx]); |
429 | read_unlock(&pathtbl_resize_lock); | 431 | read_unlock_bh(&pathtbl_resize_lock); |
430 | if (grow) { | 432 | if (grow) { |
431 | set_bit(MESH_WORK_GROW_MPP_TABLE, &ifmsh->wrkq_flags); | 433 | set_bit(MESH_WORK_GROW_MPP_TABLE, &ifmsh->wrkq_flags); |
432 | ieee80211_queue_work(&local->hw, &sdata->work); | 434 | ieee80211_queue_work(&local->hw, &sdata->work); |
@@ -434,8 +436,8 @@ int mpp_path_add(u8 *dst, u8 *mpp, struct ieee80211_sub_if_data *sdata) | |||
434 | return 0; | 436 | return 0; |
435 | 437 | ||
436 | err_exists: | 438 | err_exists: |
437 | spin_unlock(&mpp_paths->hashwlock[hash_idx]); | 439 | spin_unlock_bh(&mpp_paths->hashwlock[hash_idx]); |
438 | read_unlock(&pathtbl_resize_lock); | 440 | read_unlock_bh(&pathtbl_resize_lock); |
439 | kfree(new_node); | 441 | kfree(new_node); |
440 | err_node_alloc: | 442 | err_node_alloc: |
441 | kfree(new_mpath); | 443 | kfree(new_mpath); |
@@ -548,11 +550,11 @@ int mesh_path_del(u8 *addr, struct ieee80211_sub_if_data *sdata) | |||
548 | int hash_idx; | 550 | int hash_idx; |
549 | int err = 0; | 551 | int err = 0; |
550 | 552 | ||
551 | read_lock(&pathtbl_resize_lock); | 553 | read_lock_bh(&pathtbl_resize_lock); |
552 | hash_idx = mesh_table_hash(addr, sdata, mesh_paths); | 554 | hash_idx = mesh_table_hash(addr, sdata, mesh_paths); |
553 | bucket = &mesh_paths->hash_buckets[hash_idx]; | 555 | bucket = &mesh_paths->hash_buckets[hash_idx]; |
554 | 556 | ||
555 | spin_lock(&mesh_paths->hashwlock[hash_idx]); | 557 | spin_lock_bh(&mesh_paths->hashwlock[hash_idx]); |
556 | hlist_for_each_entry(node, n, bucket, list) { | 558 | hlist_for_each_entry(node, n, bucket, list) { |
557 | mpath = node->mpath; | 559 | mpath = node->mpath; |
558 | if (mpath->sdata == sdata && | 560 | if (mpath->sdata == sdata && |
@@ -570,8 +572,8 @@ int mesh_path_del(u8 *addr, struct ieee80211_sub_if_data *sdata) | |||
570 | err = -ENXIO; | 572 | err = -ENXIO; |
571 | enddel: | 573 | enddel: |
572 | mesh_paths_generation++; | 574 | mesh_paths_generation++; |
573 | spin_unlock(&mesh_paths->hashwlock[hash_idx]); | 575 | spin_unlock_bh(&mesh_paths->hashwlock[hash_idx]); |
574 | read_unlock(&pathtbl_resize_lock); | 576 | read_unlock_bh(&pathtbl_resize_lock); |
575 | return err; | 577 | return err; |
576 | } | 578 | } |
577 | 579 | ||
@@ -723,7 +725,7 @@ void mesh_path_expire(struct ieee80211_sub_if_data *sdata) | |||
723 | struct hlist_node *p; | 725 | struct hlist_node *p; |
724 | int i; | 726 | int i; |
725 | 727 | ||
726 | read_lock(&pathtbl_resize_lock); | 728 | read_lock_bh(&pathtbl_resize_lock); |
727 | for_each_mesh_entry(mesh_paths, p, node, i) { | 729 | for_each_mesh_entry(mesh_paths, p, node, i) { |
728 | if (node->mpath->sdata != sdata) | 730 | if (node->mpath->sdata != sdata) |
729 | continue; | 731 | continue; |
@@ -738,7 +740,7 @@ void mesh_path_expire(struct ieee80211_sub_if_data *sdata) | |||
738 | } else | 740 | } else |
739 | spin_unlock_bh(&mpath->state_lock); | 741 | spin_unlock_bh(&mpath->state_lock); |
740 | } | 742 | } |
741 | read_unlock(&pathtbl_resize_lock); | 743 | read_unlock_bh(&pathtbl_resize_lock); |
742 | } | 744 | } |
743 | 745 | ||
744 | void mesh_pathtbl_unregister(void) | 746 | void mesh_pathtbl_unregister(void) |