aboutsummaryrefslogtreecommitdiffstats
path: root/net/mac80211/mesh_pathtbl.c
diff options
context:
space:
mode:
authorJohn W. Linville <linville@tuxdriver.com>2011-05-24 16:47:54 -0400
committerJohn W. Linville <linville@tuxdriver.com>2011-05-24 16:47:54 -0400
commit31ec97d9cebac804814de298592648f7c18d8281 (patch)
treef725fcce0d5a9d6d7bd64b777de0a44e71773d0e /net/mac80211/mesh_pathtbl.c
parent557eed603159b4e007c57d97fad1333ecebd3c2e (diff)
parentdaf8cf608d57a0b9f22276036e420cc82cf6ab4f (diff)
Merge ssh://master.kernel.org/pub/scm/linux/kernel/git/linville/wireless-next-2.6 into for-davem
Diffstat (limited to 'net/mac80211/mesh_pathtbl.c')
-rw-r--r--net/mac80211/mesh_pathtbl.c204
1 files changed, 128 insertions, 76 deletions
diff --git a/net/mac80211/mesh_pathtbl.c b/net/mac80211/mesh_pathtbl.c
index 83ce48e31913..0d2faacc3e87 100644
--- a/net/mac80211/mesh_pathtbl.c
+++ b/net/mac80211/mesh_pathtbl.c
@@ -36,8 +36,8 @@ struct mpath_node {
36 struct mesh_path *mpath; 36 struct mesh_path *mpath;
37}; 37};
38 38
39static struct mesh_table *mesh_paths; 39static struct mesh_table __rcu *mesh_paths;
40static struct mesh_table *mpp_paths; /* Store paths for MPP&MAP */ 40static struct mesh_table __rcu *mpp_paths; /* Store paths for MPP&MAP */
41 41
42int mesh_paths_generation; 42int mesh_paths_generation;
43 43
@@ -48,17 +48,40 @@ int mesh_paths_generation;
48static DEFINE_RWLOCK(pathtbl_resize_lock); 48static DEFINE_RWLOCK(pathtbl_resize_lock);
49 49
50 50
51static inline struct mesh_table *resize_dereference_mesh_paths(void)
52{
53 return rcu_dereference_protected(mesh_paths,
54 lockdep_is_held(&pathtbl_resize_lock));
55}
56
57static inline struct mesh_table *resize_dereference_mpp_paths(void)
58{
59 return rcu_dereference_protected(mpp_paths,
60 lockdep_is_held(&pathtbl_resize_lock));
61}
62
63/*
64 * CAREFUL -- "tbl" must not be an expression,
65 * in particular not an rcu_dereference(), since
66 * it's used twice. So it is illegal to do
67 * for_each_mesh_entry(rcu_dereference(...), ...)
68 */
69#define for_each_mesh_entry(tbl, p, node, i) \
70 for (i = 0; i <= tbl->hash_mask; i++) \
71 hlist_for_each_entry_rcu(node, p, &tbl->hash_buckets[i], list)
72
73
51static struct mesh_table *mesh_table_alloc(int size_order) 74static struct mesh_table *mesh_table_alloc(int size_order)
52{ 75{
53 int i; 76 int i;
54 struct mesh_table *newtbl; 77 struct mesh_table *newtbl;
55 78
56 newtbl = kmalloc(sizeof(struct mesh_table), GFP_KERNEL); 79 newtbl = kmalloc(sizeof(struct mesh_table), GFP_ATOMIC);
57 if (!newtbl) 80 if (!newtbl)
58 return NULL; 81 return NULL;
59 82
60 newtbl->hash_buckets = kzalloc(sizeof(struct hlist_head) * 83 newtbl->hash_buckets = kzalloc(sizeof(struct hlist_head) *
61 (1 << size_order), GFP_KERNEL); 84 (1 << size_order), GFP_ATOMIC);
62 85
63 if (!newtbl->hash_buckets) { 86 if (!newtbl->hash_buckets) {
64 kfree(newtbl); 87 kfree(newtbl);
@@ -66,7 +89,7 @@ static struct mesh_table *mesh_table_alloc(int size_order)
66 } 89 }
67 90
68 newtbl->hashwlock = kmalloc(sizeof(spinlock_t) * 91 newtbl->hashwlock = kmalloc(sizeof(spinlock_t) *
69 (1 << size_order), GFP_KERNEL); 92 (1 << size_order), GFP_ATOMIC);
70 if (!newtbl->hashwlock) { 93 if (!newtbl->hashwlock) {
71 kfree(newtbl->hash_buckets); 94 kfree(newtbl->hash_buckets);
72 kfree(newtbl); 95 kfree(newtbl);
@@ -258,12 +281,13 @@ struct mesh_path *mpp_path_lookup(u8 *dst, struct ieee80211_sub_if_data *sdata)
258 */ 281 */
259struct mesh_path *mesh_path_lookup_by_idx(int idx, struct ieee80211_sub_if_data *sdata) 282struct mesh_path *mesh_path_lookup_by_idx(int idx, struct ieee80211_sub_if_data *sdata)
260{ 283{
284 struct mesh_table *tbl = rcu_dereference(mesh_paths);
261 struct mpath_node *node; 285 struct mpath_node *node;
262 struct hlist_node *p; 286 struct hlist_node *p;
263 int i; 287 int i;
264 int j = 0; 288 int j = 0;
265 289
266 for_each_mesh_entry(mesh_paths, p, node, i) { 290 for_each_mesh_entry(tbl, p, node, i) {
267 if (sdata && node->mpath->sdata != sdata) 291 if (sdata && node->mpath->sdata != sdata)
268 continue; 292 continue;
269 if (j++ == idx) { 293 if (j++ == idx) {
@@ -293,6 +317,7 @@ int mesh_path_add(u8 *dst, struct ieee80211_sub_if_data *sdata)
293{ 317{
294 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; 318 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
295 struct ieee80211_local *local = sdata->local; 319 struct ieee80211_local *local = sdata->local;
320 struct mesh_table *tbl;
296 struct mesh_path *mpath, *new_mpath; 321 struct mesh_path *mpath, *new_mpath;
297 struct mpath_node *node, *new_node; 322 struct mpath_node *node, *new_node;
298 struct hlist_head *bucket; 323 struct hlist_head *bucket;
@@ -332,10 +357,12 @@ int mesh_path_add(u8 *dst, struct ieee80211_sub_if_data *sdata)
332 spin_lock_init(&new_mpath->state_lock); 357 spin_lock_init(&new_mpath->state_lock);
333 init_timer(&new_mpath->timer); 358 init_timer(&new_mpath->timer);
334 359
335 hash_idx = mesh_table_hash(dst, sdata, mesh_paths); 360 tbl = resize_dereference_mesh_paths();
336 bucket = &mesh_paths->hash_buckets[hash_idx]; 361
362 hash_idx = mesh_table_hash(dst, sdata, tbl);
363 bucket = &tbl->hash_buckets[hash_idx];
337 364
338 spin_lock_bh(&mesh_paths->hashwlock[hash_idx]); 365 spin_lock_bh(&tbl->hashwlock[hash_idx]);
339 366
340 err = -EEXIST; 367 err = -EEXIST;
341 hlist_for_each_entry(node, n, bucket, list) { 368 hlist_for_each_entry(node, n, bucket, list) {
@@ -345,13 +372,13 @@ int mesh_path_add(u8 *dst, struct ieee80211_sub_if_data *sdata)
345 } 372 }
346 373
347 hlist_add_head_rcu(&new_node->list, bucket); 374 hlist_add_head_rcu(&new_node->list, bucket);
348 if (atomic_inc_return(&mesh_paths->entries) >= 375 if (atomic_inc_return(&tbl->entries) >=
349 mesh_paths->mean_chain_len * (mesh_paths->hash_mask + 1)) 376 tbl->mean_chain_len * (tbl->hash_mask + 1))
350 grow = 1; 377 grow = 1;
351 378
352 mesh_paths_generation++; 379 mesh_paths_generation++;
353 380
354 spin_unlock_bh(&mesh_paths->hashwlock[hash_idx]); 381 spin_unlock_bh(&tbl->hashwlock[hash_idx]);
355 read_unlock_bh(&pathtbl_resize_lock); 382 read_unlock_bh(&pathtbl_resize_lock);
356 if (grow) { 383 if (grow) {
357 set_bit(MESH_WORK_GROW_MPATH_TABLE, &ifmsh->wrkq_flags); 384 set_bit(MESH_WORK_GROW_MPATH_TABLE, &ifmsh->wrkq_flags);
@@ -360,7 +387,7 @@ int mesh_path_add(u8 *dst, struct ieee80211_sub_if_data *sdata)
360 return 0; 387 return 0;
361 388
362err_exists: 389err_exists:
363 spin_unlock_bh(&mesh_paths->hashwlock[hash_idx]); 390 spin_unlock_bh(&tbl->hashwlock[hash_idx]);
364 read_unlock_bh(&pathtbl_resize_lock); 391 read_unlock_bh(&pathtbl_resize_lock);
365 kfree(new_node); 392 kfree(new_node);
366err_node_alloc: 393err_node_alloc:
@@ -370,58 +397,59 @@ err_path_alloc:
370 return err; 397 return err;
371} 398}
372 399
400static void mesh_table_free_rcu(struct rcu_head *rcu)
401{
402 struct mesh_table *tbl = container_of(rcu, struct mesh_table, rcu_head);
403
404 mesh_table_free(tbl, false);
405}
406
373void mesh_mpath_table_grow(void) 407void mesh_mpath_table_grow(void)
374{ 408{
375 struct mesh_table *oldtbl, *newtbl; 409 struct mesh_table *oldtbl, *newtbl;
376 410
377 rcu_read_lock();
378 newtbl = mesh_table_alloc(rcu_dereference(mesh_paths)->size_order + 1);
379 if (!newtbl)
380 return;
381 write_lock_bh(&pathtbl_resize_lock); 411 write_lock_bh(&pathtbl_resize_lock);
382 oldtbl = mesh_paths; 412 oldtbl = resize_dereference_mesh_paths();
383 if (mesh_table_grow(mesh_paths, newtbl) < 0) { 413 newtbl = mesh_table_alloc(oldtbl->size_order + 1);
384 rcu_read_unlock(); 414 if (!newtbl)
415 goto out;
416 if (mesh_table_grow(oldtbl, newtbl) < 0) {
385 __mesh_table_free(newtbl); 417 __mesh_table_free(newtbl);
386 write_unlock_bh(&pathtbl_resize_lock); 418 goto out;
387 return;
388 } 419 }
389 rcu_read_unlock();
390 rcu_assign_pointer(mesh_paths, newtbl); 420 rcu_assign_pointer(mesh_paths, newtbl);
391 write_unlock_bh(&pathtbl_resize_lock);
392 421
393 synchronize_rcu(); 422 call_rcu(&oldtbl->rcu_head, mesh_table_free_rcu);
394 mesh_table_free(oldtbl, false); 423
424 out:
425 write_unlock_bh(&pathtbl_resize_lock);
395} 426}
396 427
397void mesh_mpp_table_grow(void) 428void mesh_mpp_table_grow(void)
398{ 429{
399 struct mesh_table *oldtbl, *newtbl; 430 struct mesh_table *oldtbl, *newtbl;
400 431
401 rcu_read_lock();
402 newtbl = mesh_table_alloc(rcu_dereference(mpp_paths)->size_order + 1);
403 if (!newtbl)
404 return;
405 write_lock_bh(&pathtbl_resize_lock); 432 write_lock_bh(&pathtbl_resize_lock);
406 oldtbl = mpp_paths; 433 oldtbl = resize_dereference_mpp_paths();
407 if (mesh_table_grow(mpp_paths, newtbl) < 0) { 434 newtbl = mesh_table_alloc(oldtbl->size_order + 1);
408 rcu_read_unlock(); 435 if (!newtbl)
436 goto out;
437 if (mesh_table_grow(oldtbl, newtbl) < 0) {
409 __mesh_table_free(newtbl); 438 __mesh_table_free(newtbl);
410 write_unlock_bh(&pathtbl_resize_lock); 439 goto out;
411 return;
412 } 440 }
413 rcu_read_unlock();
414 rcu_assign_pointer(mpp_paths, newtbl); 441 rcu_assign_pointer(mpp_paths, newtbl);
415 write_unlock_bh(&pathtbl_resize_lock); 442 call_rcu(&oldtbl->rcu_head, mesh_table_free_rcu);
416 443
417 synchronize_rcu(); 444 out:
418 mesh_table_free(oldtbl, false); 445 write_unlock_bh(&pathtbl_resize_lock);
419} 446}
420 447
421int mpp_path_add(u8 *dst, u8 *mpp, struct ieee80211_sub_if_data *sdata) 448int mpp_path_add(u8 *dst, u8 *mpp, struct ieee80211_sub_if_data *sdata)
422{ 449{
423 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; 450 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
424 struct ieee80211_local *local = sdata->local; 451 struct ieee80211_local *local = sdata->local;
452 struct mesh_table *tbl;
425 struct mesh_path *mpath, *new_mpath; 453 struct mesh_path *mpath, *new_mpath;
426 struct mpath_node *node, *new_node; 454 struct mpath_node *node, *new_node;
427 struct hlist_head *bucket; 455 struct hlist_head *bucket;
@@ -456,10 +484,12 @@ int mpp_path_add(u8 *dst, u8 *mpp, struct ieee80211_sub_if_data *sdata)
456 new_mpath->exp_time = jiffies; 484 new_mpath->exp_time = jiffies;
457 spin_lock_init(&new_mpath->state_lock); 485 spin_lock_init(&new_mpath->state_lock);
458 486
459 hash_idx = mesh_table_hash(dst, sdata, mpp_paths); 487 tbl = resize_dereference_mpp_paths();
460 bucket = &mpp_paths->hash_buckets[hash_idx];
461 488
462 spin_lock_bh(&mpp_paths->hashwlock[hash_idx]); 489 hash_idx = mesh_table_hash(dst, sdata, tbl);
490 bucket = &tbl->hash_buckets[hash_idx];
491
492 spin_lock_bh(&tbl->hashwlock[hash_idx]);
463 493
464 err = -EEXIST; 494 err = -EEXIST;
465 hlist_for_each_entry(node, n, bucket, list) { 495 hlist_for_each_entry(node, n, bucket, list) {
@@ -469,11 +499,11 @@ int mpp_path_add(u8 *dst, u8 *mpp, struct ieee80211_sub_if_data *sdata)
469 } 499 }
470 500
471 hlist_add_head_rcu(&new_node->list, bucket); 501 hlist_add_head_rcu(&new_node->list, bucket);
472 if (atomic_inc_return(&mpp_paths->entries) >= 502 if (atomic_inc_return(&tbl->entries) >=
473 mpp_paths->mean_chain_len * (mpp_paths->hash_mask + 1)) 503 tbl->mean_chain_len * (tbl->hash_mask + 1))
474 grow = 1; 504 grow = 1;
475 505
476 spin_unlock_bh(&mpp_paths->hashwlock[hash_idx]); 506 spin_unlock_bh(&tbl->hashwlock[hash_idx]);
477 read_unlock_bh(&pathtbl_resize_lock); 507 read_unlock_bh(&pathtbl_resize_lock);
478 if (grow) { 508 if (grow) {
479 set_bit(MESH_WORK_GROW_MPP_TABLE, &ifmsh->wrkq_flags); 509 set_bit(MESH_WORK_GROW_MPP_TABLE, &ifmsh->wrkq_flags);
@@ -482,7 +512,7 @@ int mpp_path_add(u8 *dst, u8 *mpp, struct ieee80211_sub_if_data *sdata)
482 return 0; 512 return 0;
483 513
484err_exists: 514err_exists:
485 spin_unlock_bh(&mpp_paths->hashwlock[hash_idx]); 515 spin_unlock_bh(&tbl->hashwlock[hash_idx]);
486 read_unlock_bh(&pathtbl_resize_lock); 516 read_unlock_bh(&pathtbl_resize_lock);
487 kfree(new_node); 517 kfree(new_node);
488err_node_alloc: 518err_node_alloc:
@@ -502,6 +532,7 @@ err_path_alloc:
502 */ 532 */
503void mesh_plink_broken(struct sta_info *sta) 533void mesh_plink_broken(struct sta_info *sta)
504{ 534{
535 struct mesh_table *tbl;
505 static const u8 bcast[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}; 536 static const u8 bcast[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
506 struct mesh_path *mpath; 537 struct mesh_path *mpath;
507 struct mpath_node *node; 538 struct mpath_node *node;
@@ -510,10 +541,11 @@ void mesh_plink_broken(struct sta_info *sta)
510 int i; 541 int i;
511 542
512 rcu_read_lock(); 543 rcu_read_lock();
513 for_each_mesh_entry(mesh_paths, p, node, i) { 544 tbl = rcu_dereference(mesh_paths);
545 for_each_mesh_entry(tbl, p, node, i) {
514 mpath = node->mpath; 546 mpath = node->mpath;
515 spin_lock_bh(&mpath->state_lock); 547 spin_lock_bh(&mpath->state_lock);
516 if (mpath->next_hop == sta && 548 if (rcu_dereference(mpath->next_hop) == sta &&
517 mpath->flags & MESH_PATH_ACTIVE && 549 mpath->flags & MESH_PATH_ACTIVE &&
518 !(mpath->flags & MESH_PATH_FIXED)) { 550 !(mpath->flags & MESH_PATH_FIXED)) {
519 mpath->flags &= ~MESH_PATH_ACTIVE; 551 mpath->flags &= ~MESH_PATH_ACTIVE;
@@ -542,30 +574,38 @@ void mesh_plink_broken(struct sta_info *sta)
542 */ 574 */
543void mesh_path_flush_by_nexthop(struct sta_info *sta) 575void mesh_path_flush_by_nexthop(struct sta_info *sta)
544{ 576{
577 struct mesh_table *tbl;
545 struct mesh_path *mpath; 578 struct mesh_path *mpath;
546 struct mpath_node *node; 579 struct mpath_node *node;
547 struct hlist_node *p; 580 struct hlist_node *p;
548 int i; 581 int i;
549 582
550 for_each_mesh_entry(mesh_paths, p, node, i) { 583 rcu_read_lock();
584 tbl = rcu_dereference(mesh_paths);
585 for_each_mesh_entry(tbl, p, node, i) {
551 mpath = node->mpath; 586 mpath = node->mpath;
552 if (mpath->next_hop == sta) 587 if (rcu_dereference(mpath->next_hop) == sta)
553 mesh_path_del(mpath->dst, mpath->sdata); 588 mesh_path_del(mpath->dst, mpath->sdata);
554 } 589 }
590 rcu_read_unlock();
555} 591}
556 592
557void mesh_path_flush(struct ieee80211_sub_if_data *sdata) 593void mesh_path_flush(struct ieee80211_sub_if_data *sdata)
558{ 594{
595 struct mesh_table *tbl;
559 struct mesh_path *mpath; 596 struct mesh_path *mpath;
560 struct mpath_node *node; 597 struct mpath_node *node;
561 struct hlist_node *p; 598 struct hlist_node *p;
562 int i; 599 int i;
563 600
564 for_each_mesh_entry(mesh_paths, p, node, i) { 601 rcu_read_lock();
602 tbl = rcu_dereference(mesh_paths);
603 for_each_mesh_entry(tbl, p, node, i) {
565 mpath = node->mpath; 604 mpath = node->mpath;
566 if (mpath->sdata == sdata) 605 if (mpath->sdata == sdata)
567 mesh_path_del(mpath->dst, mpath->sdata); 606 mesh_path_del(mpath->dst, mpath->sdata);
568 } 607 }
608 rcu_read_unlock();
569} 609}
570 610
571static void mesh_path_node_reclaim(struct rcu_head *rp) 611static void mesh_path_node_reclaim(struct rcu_head *rp)
@@ -589,6 +629,7 @@ static void mesh_path_node_reclaim(struct rcu_head *rp)
589 */ 629 */
590int mesh_path_del(u8 *addr, struct ieee80211_sub_if_data *sdata) 630int mesh_path_del(u8 *addr, struct ieee80211_sub_if_data *sdata)
591{ 631{
632 struct mesh_table *tbl;
592 struct mesh_path *mpath; 633 struct mesh_path *mpath;
593 struct mpath_node *node; 634 struct mpath_node *node;
594 struct hlist_head *bucket; 635 struct hlist_head *bucket;
@@ -597,19 +638,20 @@ int mesh_path_del(u8 *addr, struct ieee80211_sub_if_data *sdata)
597 int err = 0; 638 int err = 0;
598 639
599 read_lock_bh(&pathtbl_resize_lock); 640 read_lock_bh(&pathtbl_resize_lock);
600 hash_idx = mesh_table_hash(addr, sdata, mesh_paths); 641 tbl = resize_dereference_mesh_paths();
601 bucket = &mesh_paths->hash_buckets[hash_idx]; 642 hash_idx = mesh_table_hash(addr, sdata, tbl);
643 bucket = &tbl->hash_buckets[hash_idx];
602 644
603 spin_lock_bh(&mesh_paths->hashwlock[hash_idx]); 645 spin_lock_bh(&tbl->hashwlock[hash_idx]);
604 hlist_for_each_entry(node, n, bucket, list) { 646 hlist_for_each_entry(node, n, bucket, list) {
605 mpath = node->mpath; 647 mpath = node->mpath;
606 if (mpath->sdata == sdata && 648 if (mpath->sdata == sdata &&
607 memcmp(addr, mpath->dst, ETH_ALEN) == 0) { 649 memcmp(addr, mpath->dst, ETH_ALEN) == 0) {
608 spin_lock_bh(&mpath->state_lock); 650 spin_lock_bh(&mpath->state_lock);
609 mpath->flags |= MESH_PATH_RESOLVING; 651 mpath->flags |= MESH_PATH_RESOLVING;
610 hlist_del_rcu(&node->list); 652 hlist_del_rcu(&node->list);
611 call_rcu(&node->rcu, mesh_path_node_reclaim); 653 call_rcu(&node->rcu, mesh_path_node_reclaim);
612 atomic_dec(&mesh_paths->entries); 654 atomic_dec(&tbl->entries);
613 spin_unlock_bh(&mpath->state_lock); 655 spin_unlock_bh(&mpath->state_lock);
614 goto enddel; 656 goto enddel;
615 } 657 }
@@ -618,7 +660,7 @@ int mesh_path_del(u8 *addr, struct ieee80211_sub_if_data *sdata)
618 err = -ENXIO; 660 err = -ENXIO;
619enddel: 661enddel:
620 mesh_paths_generation++; 662 mesh_paths_generation++;
621 spin_unlock_bh(&mesh_paths->hashwlock[hash_idx]); 663 spin_unlock_bh(&tbl->hashwlock[hash_idx]);
622 read_unlock_bh(&pathtbl_resize_lock); 664 read_unlock_bh(&pathtbl_resize_lock);
623 return err; 665 return err;
624} 666}
@@ -719,8 +761,10 @@ static void mesh_path_node_free(struct hlist_node *p, bool free_leafs)
719 struct mpath_node *node = hlist_entry(p, struct mpath_node, list); 761 struct mpath_node *node = hlist_entry(p, struct mpath_node, list);
720 mpath = node->mpath; 762 mpath = node->mpath;
721 hlist_del_rcu(p); 763 hlist_del_rcu(p);
722 if (free_leafs) 764 if (free_leafs) {
765 del_timer_sync(&mpath->timer);
723 kfree(mpath); 766 kfree(mpath);
767 }
724 kfree(node); 768 kfree(node);
725} 769}
726 770
@@ -745,52 +789,60 @@ static int mesh_path_node_copy(struct hlist_node *p, struct mesh_table *newtbl)
745 789
746int mesh_pathtbl_init(void) 790int mesh_pathtbl_init(void)
747{ 791{
748 mesh_paths = mesh_table_alloc(INIT_PATHS_SIZE_ORDER); 792 struct mesh_table *tbl_path, *tbl_mpp;
749 if (!mesh_paths) 793
794 tbl_path = mesh_table_alloc(INIT_PATHS_SIZE_ORDER);
795 if (!tbl_path)
750 return -ENOMEM; 796 return -ENOMEM;
751 mesh_paths->free_node = &mesh_path_node_free; 797 tbl_path->free_node = &mesh_path_node_free;
752 mesh_paths->copy_node = &mesh_path_node_copy; 798 tbl_path->copy_node = &mesh_path_node_copy;
753 mesh_paths->mean_chain_len = MEAN_CHAIN_LEN; 799 tbl_path->mean_chain_len = MEAN_CHAIN_LEN;
754 800
755 mpp_paths = mesh_table_alloc(INIT_PATHS_SIZE_ORDER); 801 tbl_mpp = mesh_table_alloc(INIT_PATHS_SIZE_ORDER);
756 if (!mpp_paths) { 802 if (!tbl_mpp) {
757 mesh_table_free(mesh_paths, true); 803 mesh_table_free(tbl_path, true);
758 return -ENOMEM; 804 return -ENOMEM;
759 } 805 }
760 mpp_paths->free_node = &mesh_path_node_free; 806 tbl_mpp->free_node = &mesh_path_node_free;
761 mpp_paths->copy_node = &mesh_path_node_copy; 807 tbl_mpp->copy_node = &mesh_path_node_copy;
762 mpp_paths->mean_chain_len = MEAN_CHAIN_LEN; 808 tbl_mpp->mean_chain_len = MEAN_CHAIN_LEN;
809
810 /* Need no locking since this is during init */
811 RCU_INIT_POINTER(mesh_paths, tbl_path);
812 RCU_INIT_POINTER(mpp_paths, tbl_mpp);
763 813
764 return 0; 814 return 0;
765} 815}
766 816
767void mesh_path_expire(struct ieee80211_sub_if_data *sdata) 817void mesh_path_expire(struct ieee80211_sub_if_data *sdata)
768{ 818{
819 struct mesh_table *tbl;
769 struct mesh_path *mpath; 820 struct mesh_path *mpath;
770 struct mpath_node *node; 821 struct mpath_node *node;
771 struct hlist_node *p; 822 struct hlist_node *p;
772 int i; 823 int i;
773 824
774 read_lock_bh(&pathtbl_resize_lock); 825 rcu_read_lock();
775 for_each_mesh_entry(mesh_paths, p, node, i) { 826 tbl = rcu_dereference(mesh_paths);
827 for_each_mesh_entry(tbl, p, node, i) {
776 if (node->mpath->sdata != sdata) 828 if (node->mpath->sdata != sdata)
777 continue; 829 continue;
778 mpath = node->mpath; 830 mpath = node->mpath;
779 spin_lock_bh(&mpath->state_lock); 831 spin_lock_bh(&mpath->state_lock);
780 if ((!(mpath->flags & MESH_PATH_RESOLVING)) && 832 if ((!(mpath->flags & MESH_PATH_RESOLVING)) &&
781 (!(mpath->flags & MESH_PATH_FIXED)) && 833 (!(mpath->flags & MESH_PATH_FIXED)) &&
782 time_after(jiffies, 834 time_after(jiffies, mpath->exp_time + MESH_PATH_EXPIRE)) {
783 mpath->exp_time + MESH_PATH_EXPIRE)) {
784 spin_unlock_bh(&mpath->state_lock); 835 spin_unlock_bh(&mpath->state_lock);
785 mesh_path_del(mpath->dst, mpath->sdata); 836 mesh_path_del(mpath->dst, mpath->sdata);
786 } else 837 } else
787 spin_unlock_bh(&mpath->state_lock); 838 spin_unlock_bh(&mpath->state_lock);
788 } 839 }
789 read_unlock_bh(&pathtbl_resize_lock); 840 rcu_read_unlock();
790} 841}
791 842
792void mesh_pathtbl_unregister(void) 843void mesh_pathtbl_unregister(void)
793{ 844{
794 mesh_table_free(mesh_paths, true); 845 /* no need for locking during exit path */
795 mesh_table_free(mpp_paths, true); 846 mesh_table_free(rcu_dereference_raw(mesh_paths), true);
847 mesh_table_free(rcu_dereference_raw(mpp_paths), true);
796} 848}