aboutsummaryrefslogtreecommitdiffstats
path: root/net/mac80211/mesh_pathtbl.c
diff options
context:
space:
mode:
Diffstat (limited to 'net/mac80211/mesh_pathtbl.c')
-rw-r--r--net/mac80211/mesh_pathtbl.c34
1 files changed, 18 insertions, 16 deletions
diff --git a/net/mac80211/mesh_pathtbl.c b/net/mac80211/mesh_pathtbl.c
index dc51669e67d8..be1361b5f7ad 100644
--- a/net/mac80211/mesh_pathtbl.c
+++ b/net/mac80211/mesh_pathtbl.c
@@ -348,7 +348,7 @@ static struct mesh_path *mpath_lookup(struct mesh_table *tbl, u8 *dst,
348 hlist_for_each_entry_rcu(node, n, bucket, list) { 348 hlist_for_each_entry_rcu(node, n, bucket, list) {
349 mpath = node->mpath; 349 mpath = node->mpath;
350 if (mpath->sdata == sdata && 350 if (mpath->sdata == sdata &&
351 memcmp(dst, mpath->dst, ETH_ALEN) == 0) { 351 compare_ether_addr(dst, mpath->dst) == 0) {
352 if (MPATH_EXPIRED(mpath)) { 352 if (MPATH_EXPIRED(mpath)) {
353 spin_lock_bh(&mpath->state_lock); 353 spin_lock_bh(&mpath->state_lock);
354 mpath->flags &= ~MESH_PATH_ACTIVE; 354 mpath->flags &= ~MESH_PATH_ACTIVE;
@@ -523,7 +523,7 @@ int mesh_path_add(u8 *dst, struct ieee80211_sub_if_data *sdata)
523 int err = 0; 523 int err = 0;
524 u32 hash_idx; 524 u32 hash_idx;
525 525
526 if (memcmp(dst, sdata->vif.addr, ETH_ALEN) == 0) 526 if (compare_ether_addr(dst, sdata->vif.addr) == 0)
527 /* never add ourselves as neighbours */ 527 /* never add ourselves as neighbours */
528 return -ENOTSUPP; 528 return -ENOTSUPP;
529 529
@@ -559,12 +559,13 @@ int mesh_path_add(u8 *dst, struct ieee80211_sub_if_data *sdata)
559 hash_idx = mesh_table_hash(dst, sdata, tbl); 559 hash_idx = mesh_table_hash(dst, sdata, tbl);
560 bucket = &tbl->hash_buckets[hash_idx]; 560 bucket = &tbl->hash_buckets[hash_idx];
561 561
562 spin_lock_bh(&tbl->hashwlock[hash_idx]); 562 spin_lock(&tbl->hashwlock[hash_idx]);
563 563
564 err = -EEXIST; 564 err = -EEXIST;
565 hlist_for_each_entry(node, n, bucket, list) { 565 hlist_for_each_entry(node, n, bucket, list) {
566 mpath = node->mpath; 566 mpath = node->mpath;
567 if (mpath->sdata == sdata && memcmp(dst, mpath->dst, ETH_ALEN) == 0) 567 if (mpath->sdata == sdata &&
568 compare_ether_addr(dst, mpath->dst) == 0)
568 goto err_exists; 569 goto err_exists;
569 } 570 }
570 571
@@ -575,7 +576,7 @@ int mesh_path_add(u8 *dst, struct ieee80211_sub_if_data *sdata)
575 576
576 mesh_paths_generation++; 577 mesh_paths_generation++;
577 578
578 spin_unlock_bh(&tbl->hashwlock[hash_idx]); 579 spin_unlock(&tbl->hashwlock[hash_idx]);
579 read_unlock_bh(&pathtbl_resize_lock); 580 read_unlock_bh(&pathtbl_resize_lock);
580 if (grow) { 581 if (grow) {
581 set_bit(MESH_WORK_GROW_MPATH_TABLE, &ifmsh->wrkq_flags); 582 set_bit(MESH_WORK_GROW_MPATH_TABLE, &ifmsh->wrkq_flags);
@@ -584,7 +585,7 @@ int mesh_path_add(u8 *dst, struct ieee80211_sub_if_data *sdata)
584 return 0; 585 return 0;
585 586
586err_exists: 587err_exists:
587 spin_unlock_bh(&tbl->hashwlock[hash_idx]); 588 spin_unlock(&tbl->hashwlock[hash_idx]);
588 read_unlock_bh(&pathtbl_resize_lock); 589 read_unlock_bh(&pathtbl_resize_lock);
589 kfree(new_node); 590 kfree(new_node);
590err_node_alloc: 591err_node_alloc:
@@ -655,7 +656,7 @@ int mpp_path_add(u8 *dst, u8 *mpp, struct ieee80211_sub_if_data *sdata)
655 int err = 0; 656 int err = 0;
656 u32 hash_idx; 657 u32 hash_idx;
657 658
658 if (memcmp(dst, sdata->vif.addr, ETH_ALEN) == 0) 659 if (compare_ether_addr(dst, sdata->vif.addr) == 0)
659 /* never add ourselves as neighbours */ 660 /* never add ourselves as neighbours */
660 return -ENOTSUPP; 661 return -ENOTSUPP;
661 662
@@ -687,12 +688,13 @@ int mpp_path_add(u8 *dst, u8 *mpp, struct ieee80211_sub_if_data *sdata)
687 hash_idx = mesh_table_hash(dst, sdata, tbl); 688 hash_idx = mesh_table_hash(dst, sdata, tbl);
688 bucket = &tbl->hash_buckets[hash_idx]; 689 bucket = &tbl->hash_buckets[hash_idx];
689 690
690 spin_lock_bh(&tbl->hashwlock[hash_idx]); 691 spin_lock(&tbl->hashwlock[hash_idx]);
691 692
692 err = -EEXIST; 693 err = -EEXIST;
693 hlist_for_each_entry(node, n, bucket, list) { 694 hlist_for_each_entry(node, n, bucket, list) {
694 mpath = node->mpath; 695 mpath = node->mpath;
695 if (mpath->sdata == sdata && memcmp(dst, mpath->dst, ETH_ALEN) == 0) 696 if (mpath->sdata == sdata &&
697 compare_ether_addr(dst, mpath->dst) == 0)
696 goto err_exists; 698 goto err_exists;
697 } 699 }
698 700
@@ -701,7 +703,7 @@ int mpp_path_add(u8 *dst, u8 *mpp, struct ieee80211_sub_if_data *sdata)
701 tbl->mean_chain_len * (tbl->hash_mask + 1)) 703 tbl->mean_chain_len * (tbl->hash_mask + 1))
702 grow = 1; 704 grow = 1;
703 705
704 spin_unlock_bh(&tbl->hashwlock[hash_idx]); 706 spin_unlock(&tbl->hashwlock[hash_idx]);
705 read_unlock_bh(&pathtbl_resize_lock); 707 read_unlock_bh(&pathtbl_resize_lock);
706 if (grow) { 708 if (grow) {
707 set_bit(MESH_WORK_GROW_MPP_TABLE, &ifmsh->wrkq_flags); 709 set_bit(MESH_WORK_GROW_MPP_TABLE, &ifmsh->wrkq_flags);
@@ -710,7 +712,7 @@ int mpp_path_add(u8 *dst, u8 *mpp, struct ieee80211_sub_if_data *sdata)
710 return 0; 712 return 0;
711 713
712err_exists: 714err_exists:
713 spin_unlock_bh(&tbl->hashwlock[hash_idx]); 715 spin_unlock(&tbl->hashwlock[hash_idx]);
714 read_unlock_bh(&pathtbl_resize_lock); 716 read_unlock_bh(&pathtbl_resize_lock);
715 kfree(new_node); 717 kfree(new_node);
716err_node_alloc: 718err_node_alloc:
@@ -809,9 +811,9 @@ void mesh_path_flush_by_nexthop(struct sta_info *sta)
809 for_each_mesh_entry(tbl, p, node, i) { 811 for_each_mesh_entry(tbl, p, node, i) {
810 mpath = node->mpath; 812 mpath = node->mpath;
811 if (rcu_dereference(mpath->next_hop) == sta) { 813 if (rcu_dereference(mpath->next_hop) == sta) {
812 spin_lock_bh(&tbl->hashwlock[i]); 814 spin_lock(&tbl->hashwlock[i]);
813 __mesh_path_del(tbl, node); 815 __mesh_path_del(tbl, node);
814 spin_unlock_bh(&tbl->hashwlock[i]); 816 spin_unlock(&tbl->hashwlock[i]);
815 } 817 }
816 } 818 }
817 read_unlock_bh(&pathtbl_resize_lock); 819 read_unlock_bh(&pathtbl_resize_lock);
@@ -882,11 +884,11 @@ int mesh_path_del(u8 *addr, struct ieee80211_sub_if_data *sdata)
882 hash_idx = mesh_table_hash(addr, sdata, tbl); 884 hash_idx = mesh_table_hash(addr, sdata, tbl);
883 bucket = &tbl->hash_buckets[hash_idx]; 885 bucket = &tbl->hash_buckets[hash_idx];
884 886
885 spin_lock_bh(&tbl->hashwlock[hash_idx]); 887 spin_lock(&tbl->hashwlock[hash_idx]);
886 hlist_for_each_entry(node, n, bucket, list) { 888 hlist_for_each_entry(node, n, bucket, list) {
887 mpath = node->mpath; 889 mpath = node->mpath;
888 if (mpath->sdata == sdata && 890 if (mpath->sdata == sdata &&
889 memcmp(addr, mpath->dst, ETH_ALEN) == 0) { 891 compare_ether_addr(addr, mpath->dst) == 0) {
890 __mesh_path_del(tbl, node); 892 __mesh_path_del(tbl, node);
891 goto enddel; 893 goto enddel;
892 } 894 }
@@ -895,7 +897,7 @@ int mesh_path_del(u8 *addr, struct ieee80211_sub_if_data *sdata)
895 err = -ENXIO; 897 err = -ENXIO;
896enddel: 898enddel:
897 mesh_paths_generation++; 899 mesh_paths_generation++;
898 spin_unlock_bh(&tbl->hashwlock[hash_idx]); 900 spin_unlock(&tbl->hashwlock[hash_idx]);
899 read_unlock_bh(&pathtbl_resize_lock); 901 read_unlock_bh(&pathtbl_resize_lock);
900 return err; 902 return err;
901} 903}