aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJohannes Berg <johannes.berg@intel.com>2011-05-14 05:56:16 -0400
committerJohn W. Linville <linville@tuxdriver.com>2011-05-16 14:25:29 -0400
commit349eb8cf45aadd35836fdfde75b3265a01b2aaa1 (patch)
tree596eede64614dcf99967ddea15d301a9fb5e1dd3
parent1928ecab620907a0953f811316d05f367f3f4dba (diff)
mac80211: annotate and fix RCU in mesh code
This adds proper RCU annotations to the mesh path table code, and fixes a number of bugs in the code that I found while checking the sparse warnings I got as a result of the annotations. Some things like the changes in mesh_path_add() or mesh_pathtbl_init() only serve to shut up sparse, but other changes like the changes surrounding the for_each_mesh_entry() macro fix real RCU bugs in the code. Signed-off-by: Johannes Berg <johannes.berg@intel.com> Signed-off-by: John W. Linville <linville@tuxdriver.com>
-rw-r--r--net/mac80211/mesh.h4
-rw-r--r--net/mac80211/mesh_pathtbl.c154
2 files changed, 102 insertions, 56 deletions
diff --git a/net/mac80211/mesh.h b/net/mac80211/mesh.h
index eb733c0d61aa..249e733362e7 100644
--- a/net/mac80211/mesh.h
+++ b/net/mac80211/mesh.h
@@ -289,10 +289,6 @@ static inline bool mesh_path_sel_is_hwmp(struct ieee80211_sub_if_data *sdata)
289 return sdata->u.mesh.mesh_pp_id == IEEE80211_PATH_PROTOCOL_HWMP; 289 return sdata->u.mesh.mesh_pp_id == IEEE80211_PATH_PROTOCOL_HWMP;
290} 290}
291 291
292#define for_each_mesh_entry(x, p, node, i) \
293 for (i = 0; i <= x->hash_mask; i++) \
294 hlist_for_each_entry_rcu(node, p, &x->hash_buckets[i], list)
295
296void ieee80211_mesh_notify_scan_completed(struct ieee80211_local *local); 292void ieee80211_mesh_notify_scan_completed(struct ieee80211_local *local);
297 293
298void ieee80211_mesh_quiesce(struct ieee80211_sub_if_data *sdata); 294void ieee80211_mesh_quiesce(struct ieee80211_sub_if_data *sdata);
diff --git a/net/mac80211/mesh_pathtbl.c b/net/mac80211/mesh_pathtbl.c
index 74021365b8c8..2bda0ac62326 100644
--- a/net/mac80211/mesh_pathtbl.c
+++ b/net/mac80211/mesh_pathtbl.c
@@ -36,8 +36,8 @@ struct mpath_node {
36 struct mesh_path *mpath; 36 struct mesh_path *mpath;
37}; 37};
38 38
39static struct mesh_table *mesh_paths; 39static struct mesh_table __rcu *mesh_paths;
40static struct mesh_table *mpp_paths; /* Store paths for MPP&MAP */ 40static struct mesh_table __rcu *mpp_paths; /* Store paths for MPP&MAP */
41 41
42int mesh_paths_generation; 42int mesh_paths_generation;
43 43
@@ -48,6 +48,29 @@ int mesh_paths_generation;
48static DEFINE_RWLOCK(pathtbl_resize_lock); 48static DEFINE_RWLOCK(pathtbl_resize_lock);
49 49
50 50
51static inline struct mesh_table *resize_dereference_mesh_paths(void)
52{
53 return rcu_dereference_protected(mesh_paths,
54 lockdep_is_held(&pathtbl_resize_lock));
55}
56
57static inline struct mesh_table *resize_dereference_mpp_paths(void)
58{
59 return rcu_dereference_protected(mpp_paths,
60 lockdep_is_held(&pathtbl_resize_lock));
61}
62
63/*
64 * CAREFUL -- "tbl" must not be an expression,
65 * in particular not an rcu_dereference(), since
66 * it's used twice. So it is illegal to do
67 * for_each_mesh_entry(rcu_dereference(...), ...)
68 */
69#define for_each_mesh_entry(tbl, p, node, i) \
70 for (i = 0; i <= tbl->hash_mask; i++) \
71 hlist_for_each_entry_rcu(node, p, &tbl->hash_buckets[i], list)
72
73
51static struct mesh_table *mesh_table_alloc(int size_order) 74static struct mesh_table *mesh_table_alloc(int size_order)
52{ 75{
53 int i; 76 int i;
@@ -258,12 +281,13 @@ struct mesh_path *mpp_path_lookup(u8 *dst, struct ieee80211_sub_if_data *sdata)
258 */ 281 */
259struct mesh_path *mesh_path_lookup_by_idx(int idx, struct ieee80211_sub_if_data *sdata) 282struct mesh_path *mesh_path_lookup_by_idx(int idx, struct ieee80211_sub_if_data *sdata)
260{ 283{
284 struct mesh_table *tbl = rcu_dereference(mesh_paths);
261 struct mpath_node *node; 285 struct mpath_node *node;
262 struct hlist_node *p; 286 struct hlist_node *p;
263 int i; 287 int i;
264 int j = 0; 288 int j = 0;
265 289
266 for_each_mesh_entry(mesh_paths, p, node, i) { 290 for_each_mesh_entry(tbl, p, node, i) {
267 if (sdata && node->mpath->sdata != sdata) 291 if (sdata && node->mpath->sdata != sdata)
268 continue; 292 continue;
269 if (j++ == idx) { 293 if (j++ == idx) {
@@ -293,6 +317,7 @@ int mesh_path_add(u8 *dst, struct ieee80211_sub_if_data *sdata)
293{ 317{
294 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; 318 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
295 struct ieee80211_local *local = sdata->local; 319 struct ieee80211_local *local = sdata->local;
320 struct mesh_table *tbl;
296 struct mesh_path *mpath, *new_mpath; 321 struct mesh_path *mpath, *new_mpath;
297 struct mpath_node *node, *new_node; 322 struct mpath_node *node, *new_node;
298 struct hlist_head *bucket; 323 struct hlist_head *bucket;
@@ -332,10 +357,12 @@ int mesh_path_add(u8 *dst, struct ieee80211_sub_if_data *sdata)
332 spin_lock_init(&new_mpath->state_lock); 357 spin_lock_init(&new_mpath->state_lock);
333 init_timer(&new_mpath->timer); 358 init_timer(&new_mpath->timer);
334 359
335 hash_idx = mesh_table_hash(dst, sdata, mesh_paths); 360 tbl = resize_dereference_mesh_paths();
336 bucket = &mesh_paths->hash_buckets[hash_idx];
337 361
338 spin_lock_bh(&mesh_paths->hashwlock[hash_idx]); 362 hash_idx = mesh_table_hash(dst, sdata, tbl);
363 bucket = &tbl->hash_buckets[hash_idx];
364
365 spin_lock_bh(&tbl->hashwlock[hash_idx]);
339 366
340 err = -EEXIST; 367 err = -EEXIST;
341 hlist_for_each_entry(node, n, bucket, list) { 368 hlist_for_each_entry(node, n, bucket, list) {
@@ -345,13 +372,13 @@ int mesh_path_add(u8 *dst, struct ieee80211_sub_if_data *sdata)
345 } 372 }
346 373
347 hlist_add_head_rcu(&new_node->list, bucket); 374 hlist_add_head_rcu(&new_node->list, bucket);
348 if (atomic_inc_return(&mesh_paths->entries) >= 375 if (atomic_inc_return(&tbl->entries) >=
349 mesh_paths->mean_chain_len * (mesh_paths->hash_mask + 1)) 376 tbl->mean_chain_len * (tbl->hash_mask + 1))
350 grow = 1; 377 grow = 1;
351 378
352 mesh_paths_generation++; 379 mesh_paths_generation++;
353 380
354 spin_unlock_bh(&mesh_paths->hashwlock[hash_idx]); 381 spin_unlock_bh(&tbl->hashwlock[hash_idx]);
355 read_unlock_bh(&pathtbl_resize_lock); 382 read_unlock_bh(&pathtbl_resize_lock);
356 if (grow) { 383 if (grow) {
357 set_bit(MESH_WORK_GROW_MPATH_TABLE, &ifmsh->wrkq_flags); 384 set_bit(MESH_WORK_GROW_MPATH_TABLE, &ifmsh->wrkq_flags);
@@ -360,7 +387,7 @@ int mesh_path_add(u8 *dst, struct ieee80211_sub_if_data *sdata)
360 return 0; 387 return 0;
361 388
362err_exists: 389err_exists:
363 spin_unlock_bh(&mesh_paths->hashwlock[hash_idx]); 390 spin_unlock_bh(&tbl->hashwlock[hash_idx]);
364 read_unlock_bh(&pathtbl_resize_lock); 391 read_unlock_bh(&pathtbl_resize_lock);
365 kfree(new_node); 392 kfree(new_node);
366err_node_alloc: 393err_node_alloc:
@@ -382,11 +409,11 @@ void mesh_mpath_table_grow(void)
382 struct mesh_table *oldtbl, *newtbl; 409 struct mesh_table *oldtbl, *newtbl;
383 410
384 write_lock_bh(&pathtbl_resize_lock); 411 write_lock_bh(&pathtbl_resize_lock);
385 newtbl = mesh_table_alloc(mesh_paths->size_order + 1); 412 oldtbl = resize_dereference_mesh_paths();
413 newtbl = mesh_table_alloc(oldtbl->size_order + 1);
386 if (!newtbl) 414 if (!newtbl)
387 goto out; 415 goto out;
388 oldtbl = mesh_paths; 416 if (mesh_table_grow(oldtbl, newtbl) < 0) {
389 if (mesh_table_grow(mesh_paths, newtbl) < 0) {
390 __mesh_table_free(newtbl); 417 __mesh_table_free(newtbl);
391 goto out; 418 goto out;
392 } 419 }
@@ -403,11 +430,11 @@ void mesh_mpp_table_grow(void)
403 struct mesh_table *oldtbl, *newtbl; 430 struct mesh_table *oldtbl, *newtbl;
404 431
405 write_lock_bh(&pathtbl_resize_lock); 432 write_lock_bh(&pathtbl_resize_lock);
406 newtbl = mesh_table_alloc(mpp_paths->size_order + 1); 433 oldtbl = resize_dereference_mpp_paths();
434 newtbl = mesh_table_alloc(oldtbl->size_order + 1);
407 if (!newtbl) 435 if (!newtbl)
408 goto out; 436 goto out;
409 oldtbl = mpp_paths; 437 if (mesh_table_grow(oldtbl, newtbl) < 0) {
410 if (mesh_table_grow(mpp_paths, newtbl) < 0) {
411 __mesh_table_free(newtbl); 438 __mesh_table_free(newtbl);
412 goto out; 439 goto out;
413 } 440 }
@@ -422,6 +449,7 @@ int mpp_path_add(u8 *dst, u8 *mpp, struct ieee80211_sub_if_data *sdata)
422{ 449{
423 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; 450 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
424 struct ieee80211_local *local = sdata->local; 451 struct ieee80211_local *local = sdata->local;
452 struct mesh_table *tbl;
425 struct mesh_path *mpath, *new_mpath; 453 struct mesh_path *mpath, *new_mpath;
426 struct mpath_node *node, *new_node; 454 struct mpath_node *node, *new_node;
427 struct hlist_head *bucket; 455 struct hlist_head *bucket;
@@ -456,10 +484,12 @@ int mpp_path_add(u8 *dst, u8 *mpp, struct ieee80211_sub_if_data *sdata)
456 new_mpath->exp_time = jiffies; 484 new_mpath->exp_time = jiffies;
457 spin_lock_init(&new_mpath->state_lock); 485 spin_lock_init(&new_mpath->state_lock);
458 486
459 hash_idx = mesh_table_hash(dst, sdata, mpp_paths); 487 tbl = resize_dereference_mpp_paths();
460 bucket = &mpp_paths->hash_buckets[hash_idx];
461 488
462 spin_lock_bh(&mpp_paths->hashwlock[hash_idx]); 489 hash_idx = mesh_table_hash(dst, sdata, tbl);
490 bucket = &tbl->hash_buckets[hash_idx];
491
492 spin_lock_bh(&tbl->hashwlock[hash_idx]);
463 493
464 err = -EEXIST; 494 err = -EEXIST;
465 hlist_for_each_entry(node, n, bucket, list) { 495 hlist_for_each_entry(node, n, bucket, list) {
@@ -469,11 +499,11 @@ int mpp_path_add(u8 *dst, u8 *mpp, struct ieee80211_sub_if_data *sdata)
469 } 499 }
470 500
471 hlist_add_head_rcu(&new_node->list, bucket); 501 hlist_add_head_rcu(&new_node->list, bucket);
472 if (atomic_inc_return(&mpp_paths->entries) >= 502 if (atomic_inc_return(&tbl->entries) >=
473 mpp_paths->mean_chain_len * (mpp_paths->hash_mask + 1)) 503 tbl->mean_chain_len * (tbl->hash_mask + 1))
474 grow = 1; 504 grow = 1;
475 505
476 spin_unlock_bh(&mpp_paths->hashwlock[hash_idx]); 506 spin_unlock_bh(&tbl->hashwlock[hash_idx]);
477 read_unlock_bh(&pathtbl_resize_lock); 507 read_unlock_bh(&pathtbl_resize_lock);
478 if (grow) { 508 if (grow) {
479 set_bit(MESH_WORK_GROW_MPP_TABLE, &ifmsh->wrkq_flags); 509 set_bit(MESH_WORK_GROW_MPP_TABLE, &ifmsh->wrkq_flags);
@@ -482,7 +512,7 @@ int mpp_path_add(u8 *dst, u8 *mpp, struct ieee80211_sub_if_data *sdata)
482 return 0; 512 return 0;
483 513
484err_exists: 514err_exists:
485 spin_unlock_bh(&mpp_paths->hashwlock[hash_idx]); 515 spin_unlock_bh(&tbl->hashwlock[hash_idx]);
486 read_unlock_bh(&pathtbl_resize_lock); 516 read_unlock_bh(&pathtbl_resize_lock);
487 kfree(new_node); 517 kfree(new_node);
488err_node_alloc: 518err_node_alloc:
@@ -502,6 +532,7 @@ err_path_alloc:
502 */ 532 */
503void mesh_plink_broken(struct sta_info *sta) 533void mesh_plink_broken(struct sta_info *sta)
504{ 534{
535 struct mesh_table *tbl;
505 static const u8 bcast[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}; 536 static const u8 bcast[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
506 struct mesh_path *mpath; 537 struct mesh_path *mpath;
507 struct mpath_node *node; 538 struct mpath_node *node;
@@ -510,10 +541,11 @@ void mesh_plink_broken(struct sta_info *sta)
510 int i; 541 int i;
511 542
512 rcu_read_lock(); 543 rcu_read_lock();
513 for_each_mesh_entry(mesh_paths, p, node, i) { 544 tbl = rcu_dereference(mesh_paths);
545 for_each_mesh_entry(tbl, p, node, i) {
514 mpath = node->mpath; 546 mpath = node->mpath;
515 spin_lock_bh(&mpath->state_lock); 547 spin_lock_bh(&mpath->state_lock);
516 if (mpath->next_hop == sta && 548 if (rcu_dereference(mpath->next_hop) == sta &&
517 mpath->flags & MESH_PATH_ACTIVE && 549 mpath->flags & MESH_PATH_ACTIVE &&
518 !(mpath->flags & MESH_PATH_FIXED)) { 550 !(mpath->flags & MESH_PATH_FIXED)) {
519 mpath->flags &= ~MESH_PATH_ACTIVE; 551 mpath->flags &= ~MESH_PATH_ACTIVE;
@@ -542,30 +574,38 @@ void mesh_plink_broken(struct sta_info *sta)
542 */ 574 */
543void mesh_path_flush_by_nexthop(struct sta_info *sta) 575void mesh_path_flush_by_nexthop(struct sta_info *sta)
544{ 576{
577 struct mesh_table *tbl;
545 struct mesh_path *mpath; 578 struct mesh_path *mpath;
546 struct mpath_node *node; 579 struct mpath_node *node;
547 struct hlist_node *p; 580 struct hlist_node *p;
548 int i; 581 int i;
549 582
550 for_each_mesh_entry(mesh_paths, p, node, i) { 583 rcu_read_lock();
584 tbl = rcu_dereference(mesh_paths);
585 for_each_mesh_entry(tbl, p, node, i) {
551 mpath = node->mpath; 586 mpath = node->mpath;
552 if (mpath->next_hop == sta) 587 if (rcu_dereference(mpath->next_hop) == sta)
553 mesh_path_del(mpath->dst, mpath->sdata); 588 mesh_path_del(mpath->dst, mpath->sdata);
554 } 589 }
590 rcu_read_unlock();
555} 591}
556 592
557void mesh_path_flush(struct ieee80211_sub_if_data *sdata) 593void mesh_path_flush(struct ieee80211_sub_if_data *sdata)
558{ 594{
595 struct mesh_table *tbl;
559 struct mesh_path *mpath; 596 struct mesh_path *mpath;
560 struct mpath_node *node; 597 struct mpath_node *node;
561 struct hlist_node *p; 598 struct hlist_node *p;
562 int i; 599 int i;
563 600
564 for_each_mesh_entry(mesh_paths, p, node, i) { 601 rcu_read_lock();
602 tbl = rcu_dereference(mesh_paths);
603 for_each_mesh_entry(tbl, p, node, i) {
565 mpath = node->mpath; 604 mpath = node->mpath;
566 if (mpath->sdata == sdata) 605 if (mpath->sdata == sdata)
567 mesh_path_del(mpath->dst, mpath->sdata); 606 mesh_path_del(mpath->dst, mpath->sdata);
568 } 607 }
608 rcu_read_unlock();
569} 609}
570 610
571static void mesh_path_node_reclaim(struct rcu_head *rp) 611static void mesh_path_node_reclaim(struct rcu_head *rp)
@@ -589,6 +629,7 @@ static void mesh_path_node_reclaim(struct rcu_head *rp)
589 */ 629 */
590int mesh_path_del(u8 *addr, struct ieee80211_sub_if_data *sdata) 630int mesh_path_del(u8 *addr, struct ieee80211_sub_if_data *sdata)
591{ 631{
632 struct mesh_table *tbl;
592 struct mesh_path *mpath; 633 struct mesh_path *mpath;
593 struct mpath_node *node; 634 struct mpath_node *node;
594 struct hlist_head *bucket; 635 struct hlist_head *bucket;
@@ -597,19 +638,20 @@ int mesh_path_del(u8 *addr, struct ieee80211_sub_if_data *sdata)
597 int err = 0; 638 int err = 0;
598 639
599 read_lock_bh(&pathtbl_resize_lock); 640 read_lock_bh(&pathtbl_resize_lock);
600 hash_idx = mesh_table_hash(addr, sdata, mesh_paths); 641 tbl = resize_dereference_mesh_paths();
601 bucket = &mesh_paths->hash_buckets[hash_idx]; 642 hash_idx = mesh_table_hash(addr, sdata, tbl);
643 bucket = &tbl->hash_buckets[hash_idx];
602 644
603 spin_lock_bh(&mesh_paths->hashwlock[hash_idx]); 645 spin_lock_bh(&tbl->hashwlock[hash_idx]);
604 hlist_for_each_entry(node, n, bucket, list) { 646 hlist_for_each_entry(node, n, bucket, list) {
605 mpath = node->mpath; 647 mpath = node->mpath;
606 if (mpath->sdata == sdata && 648 if (mpath->sdata == sdata &&
607 memcmp(addr, mpath->dst, ETH_ALEN) == 0) { 649 memcmp(addr, mpath->dst, ETH_ALEN) == 0) {
608 spin_lock_bh(&mpath->state_lock); 650 spin_lock_bh(&mpath->state_lock);
609 mpath->flags |= MESH_PATH_RESOLVING; 651 mpath->flags |= MESH_PATH_RESOLVING;
610 hlist_del_rcu(&node->list); 652 hlist_del_rcu(&node->list);
611 call_rcu(&node->rcu, mesh_path_node_reclaim); 653 call_rcu(&node->rcu, mesh_path_node_reclaim);
612 atomic_dec(&mesh_paths->entries); 654 atomic_dec(&tbl->entries);
613 spin_unlock_bh(&mpath->state_lock); 655 spin_unlock_bh(&mpath->state_lock);
614 goto enddel; 656 goto enddel;
615 } 657 }
@@ -618,7 +660,7 @@ int mesh_path_del(u8 *addr, struct ieee80211_sub_if_data *sdata)
618 err = -ENXIO; 660 err = -ENXIO;
619enddel: 661enddel:
620 mesh_paths_generation++; 662 mesh_paths_generation++;
621 spin_unlock_bh(&mesh_paths->hashwlock[hash_idx]); 663 spin_unlock_bh(&tbl->hashwlock[hash_idx]);
622 read_unlock_bh(&pathtbl_resize_lock); 664 read_unlock_bh(&pathtbl_resize_lock);
623 return err; 665 return err;
624} 666}
@@ -747,52 +789,60 @@ static int mesh_path_node_copy(struct hlist_node *p, struct mesh_table *newtbl)
747 789
748int mesh_pathtbl_init(void) 790int mesh_pathtbl_init(void)
749{ 791{
750 mesh_paths = mesh_table_alloc(INIT_PATHS_SIZE_ORDER); 792 struct mesh_table *tbl_path, *tbl_mpp;
751 if (!mesh_paths) 793
794 tbl_path = mesh_table_alloc(INIT_PATHS_SIZE_ORDER);
795 if (!tbl_path)
752 return -ENOMEM; 796 return -ENOMEM;
753 mesh_paths->free_node = &mesh_path_node_free; 797 tbl_path->free_node = &mesh_path_node_free;
754 mesh_paths->copy_node = &mesh_path_node_copy; 798 tbl_path->copy_node = &mesh_path_node_copy;
755 mesh_paths->mean_chain_len = MEAN_CHAIN_LEN; 799 tbl_path->mean_chain_len = MEAN_CHAIN_LEN;
756 800
757 mpp_paths = mesh_table_alloc(INIT_PATHS_SIZE_ORDER); 801 tbl_mpp = mesh_table_alloc(INIT_PATHS_SIZE_ORDER);
758 if (!mpp_paths) { 802 if (!tbl_mpp) {
759 mesh_table_free(mesh_paths, true); 803 mesh_table_free(tbl_path, true);
760 return -ENOMEM; 804 return -ENOMEM;
761 } 805 }
762 mpp_paths->free_node = &mesh_path_node_free; 806 tbl_mpp->free_node = &mesh_path_node_free;
763 mpp_paths->copy_node = &mesh_path_node_copy; 807 tbl_mpp->copy_node = &mesh_path_node_copy;
764 mpp_paths->mean_chain_len = MEAN_CHAIN_LEN; 808 tbl_mpp->mean_chain_len = MEAN_CHAIN_LEN;
809
810 /* Need no locking since this is during init */
811 RCU_INIT_POINTER(mesh_paths, tbl_path);
812 RCU_INIT_POINTER(mpp_paths, tbl_mpp);
765 813
766 return 0; 814 return 0;
767} 815}
768 816
769void mesh_path_expire(struct ieee80211_sub_if_data *sdata) 817void mesh_path_expire(struct ieee80211_sub_if_data *sdata)
770{ 818{
819 struct mesh_table *tbl;
771 struct mesh_path *mpath; 820 struct mesh_path *mpath;
772 struct mpath_node *node; 821 struct mpath_node *node;
773 struct hlist_node *p; 822 struct hlist_node *p;
774 int i; 823 int i;
775 824
776 read_lock_bh(&pathtbl_resize_lock); 825 rcu_read_lock();
777 for_each_mesh_entry(mesh_paths, p, node, i) { 826 tbl = rcu_dereference(mesh_paths);
827 for_each_mesh_entry(tbl, p, node, i) {
778 if (node->mpath->sdata != sdata) 828 if (node->mpath->sdata != sdata)
779 continue; 829 continue;
780 mpath = node->mpath; 830 mpath = node->mpath;
781 spin_lock_bh(&mpath->state_lock); 831 spin_lock_bh(&mpath->state_lock);
782 if ((!(mpath->flags & MESH_PATH_RESOLVING)) && 832 if ((!(mpath->flags & MESH_PATH_RESOLVING)) &&
783 (!(mpath->flags & MESH_PATH_FIXED)) && 833 (!(mpath->flags & MESH_PATH_FIXED)) &&
784 time_after(jiffies, 834 time_after(jiffies, mpath->exp_time + MESH_PATH_EXPIRE)) {
785 mpath->exp_time + MESH_PATH_EXPIRE)) {
786 spin_unlock_bh(&mpath->state_lock); 835 spin_unlock_bh(&mpath->state_lock);
787 mesh_path_del(mpath->dst, mpath->sdata); 836 mesh_path_del(mpath->dst, mpath->sdata);
788 } else 837 } else
789 spin_unlock_bh(&mpath->state_lock); 838 spin_unlock_bh(&mpath->state_lock);
790 } 839 }
791 read_unlock_bh(&pathtbl_resize_lock); 840 rcu_read_unlock();
792} 841}
793 842
794void mesh_pathtbl_unregister(void) 843void mesh_pathtbl_unregister(void)
795{ 844{
796 mesh_table_free(mesh_paths, true); 845 /* no need for locking during exit path */
797 mesh_table_free(mpp_paths, true); 846 mesh_table_free(rcu_dereference_raw(mesh_paths), true);
847 mesh_table_free(rcu_dereference_raw(mpp_paths), true);
798} 848}