diff options
author | Bob Copeland <me@bobcopeland.com> | 2016-02-28 20:03:56 -0500 |
---|---|---|
committer | Johannes Berg <johannes.berg@intel.com> | 2016-04-05 04:56:30 -0400 |
commit | 2bdaf386f99c4a82788812e583ff59c6714ae4d6 (patch) | |
tree | 903aff9c23f928cc3c1f5811ac4ff129d0206546 /net/mac80211 | |
parent | 12880d169471fb14c46d6f323f31127702a6d5e6 (diff) |
mac80211: mesh: move path tables into if_mesh
The mesh path and mesh gate hashtables are global, containing
all of the mpaths for every mesh interface, but the paths are
all tied logically to a single interface. The common case is
just a single mesh interface, so optimize for that by moving
the global hashtable into the per-interface struct.
Doing so allows us to drop sdata pointer comparisons inside
the lookups and also saves a few bytes of BSS and data.
Signed-off-by: Bob Copeland <me@bobcopeland.com>
Signed-off-by: Johannes Berg <johannes.berg@intel.com>
Diffstat (limited to 'net/mac80211')
-rw-r--r-- | net/mac80211/cfg.c | 4 | ||||
-rw-r--r-- | net/mac80211/ieee80211_i.h | 12 | ||||
-rw-r--r-- | net/mac80211/mesh.c | 10 | ||||
-rw-r--r-- | net/mac80211/mesh.h | 10 | ||||
-rw-r--r-- | net/mac80211/mesh_pathtbl.c | 181 | ||||
-rw-r--r-- | net/mac80211/tx.c | 2 |
6 files changed, 104 insertions, 115 deletions
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c index fe1704c4e8fb..b37adb60c9cb 100644 --- a/net/mac80211/cfg.c +++ b/net/mac80211/cfg.c | |||
@@ -1499,7 +1499,7 @@ static void mpath_set_pinfo(struct mesh_path *mpath, u8 *next_hop, | |||
1499 | 1499 | ||
1500 | memset(pinfo, 0, sizeof(*pinfo)); | 1500 | memset(pinfo, 0, sizeof(*pinfo)); |
1501 | 1501 | ||
1502 | pinfo->generation = mesh_paths_generation; | 1502 | pinfo->generation = mpath->sdata->u.mesh.mesh_paths_generation; |
1503 | 1503 | ||
1504 | pinfo->filled = MPATH_INFO_FRAME_QLEN | | 1504 | pinfo->filled = MPATH_INFO_FRAME_QLEN | |
1505 | MPATH_INFO_SN | | 1505 | MPATH_INFO_SN | |
@@ -1577,7 +1577,7 @@ static void mpp_set_pinfo(struct mesh_path *mpath, u8 *mpp, | |||
1577 | memset(pinfo, 0, sizeof(*pinfo)); | 1577 | memset(pinfo, 0, sizeof(*pinfo)); |
1578 | memcpy(mpp, mpath->mpp, ETH_ALEN); | 1578 | memcpy(mpp, mpath->mpp, ETH_ALEN); |
1579 | 1579 | ||
1580 | pinfo->generation = mpp_paths_generation; | 1580 | pinfo->generation = mpath->sdata->u.mesh.mpp_paths_generation; |
1581 | } | 1581 | } |
1582 | 1582 | ||
1583 | static int ieee80211_get_mpp(struct wiphy *wiphy, struct net_device *dev, | 1583 | static int ieee80211_get_mpp(struct wiphy *wiphy, struct net_device *dev, |
diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h index 804575ff7af5..db7f0dbebc4b 100644 --- a/net/mac80211/ieee80211_i.h +++ b/net/mac80211/ieee80211_i.h | |||
@@ -696,6 +696,18 @@ struct ieee80211_if_mesh { | |||
696 | 696 | ||
697 | /* offset from skb->data while building IE */ | 697 | /* offset from skb->data while building IE */ |
698 | int meshconf_offset; | 698 | int meshconf_offset; |
699 | |||
700 | struct mesh_table __rcu *mesh_paths; | ||
701 | struct mesh_table __rcu *mpp_paths; /* Store paths for MPP&MAP */ | ||
702 | int mesh_paths_generation; | ||
703 | int mpp_paths_generation; | ||
704 | |||
705 | /* Protects assignment of the mesh_paths/mpp_paths table | ||
706 | * pointer for resize against reading it for add/delete | ||
707 | * of individual paths. Pure readers (lookups) just use | ||
708 | * RCU. | ||
709 | */ | ||
710 | rwlock_t pathtbl_resize_lock; | ||
699 | }; | 711 | }; |
700 | 712 | ||
701 | #ifdef CONFIG_MAC80211_MESH | 713 | #ifdef CONFIG_MAC80211_MESH |
diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c index d32cefcb63b0..c92af2a7714d 100644 --- a/net/mac80211/mesh.c +++ b/net/mac80211/mesh.c | |||
@@ -25,7 +25,6 @@ bool mesh_action_is_path_sel(struct ieee80211_mgmt *mgmt) | |||
25 | 25 | ||
26 | void ieee80211s_init(void) | 26 | void ieee80211s_init(void) |
27 | { | 27 | { |
28 | mesh_pathtbl_init(); | ||
29 | mesh_allocated = 1; | 28 | mesh_allocated = 1; |
30 | rm_cache = kmem_cache_create("mesh_rmc", sizeof(struct rmc_entry), | 29 | rm_cache = kmem_cache_create("mesh_rmc", sizeof(struct rmc_entry), |
31 | 0, 0, NULL); | 30 | 0, 0, NULL); |
@@ -35,7 +34,6 @@ void ieee80211s_stop(void) | |||
35 | { | 34 | { |
36 | if (!mesh_allocated) | 35 | if (!mesh_allocated) |
37 | return; | 36 | return; |
38 | mesh_pathtbl_unregister(); | ||
39 | kmem_cache_destroy(rm_cache); | 37 | kmem_cache_destroy(rm_cache); |
40 | } | 38 | } |
41 | 39 | ||
@@ -902,6 +900,7 @@ void ieee80211_stop_mesh(struct ieee80211_sub_if_data *sdata) | |||
902 | /* flush STAs and mpaths on this iface */ | 900 | /* flush STAs and mpaths on this iface */ |
903 | sta_info_flush(sdata); | 901 | sta_info_flush(sdata); |
904 | mesh_path_flush_by_iface(sdata); | 902 | mesh_path_flush_by_iface(sdata); |
903 | mesh_pathtbl_unregister(sdata); | ||
905 | 904 | ||
906 | /* free all potentially still buffered group-addressed frames */ | 905 | /* free all potentially still buffered group-addressed frames */ |
907 | local->total_ps_buffered -= skb_queue_len(&ifmsh->ps.bc_buf); | 906 | local->total_ps_buffered -= skb_queue_len(&ifmsh->ps.bc_buf); |
@@ -1349,10 +1348,10 @@ void ieee80211_mesh_work(struct ieee80211_sub_if_data *sdata) | |||
1349 | mesh_path_start_discovery(sdata); | 1348 | mesh_path_start_discovery(sdata); |
1350 | 1349 | ||
1351 | if (test_and_clear_bit(MESH_WORK_GROW_MPATH_TABLE, &ifmsh->wrkq_flags)) | 1350 | if (test_and_clear_bit(MESH_WORK_GROW_MPATH_TABLE, &ifmsh->wrkq_flags)) |
1352 | mesh_mpath_table_grow(); | 1351 | mesh_mpath_table_grow(sdata); |
1353 | 1352 | ||
1354 | if (test_and_clear_bit(MESH_WORK_GROW_MPP_TABLE, &ifmsh->wrkq_flags)) | 1353 | if (test_and_clear_bit(MESH_WORK_GROW_MPP_TABLE, &ifmsh->wrkq_flags)) |
1355 | mesh_mpp_table_grow(); | 1354 | mesh_mpp_table_grow(sdata); |
1356 | 1355 | ||
1357 | if (test_and_clear_bit(MESH_WORK_HOUSEKEEPING, &ifmsh->wrkq_flags)) | 1356 | if (test_and_clear_bit(MESH_WORK_HOUSEKEEPING, &ifmsh->wrkq_flags)) |
1358 | ieee80211_mesh_housekeeping(sdata); | 1357 | ieee80211_mesh_housekeeping(sdata); |
@@ -1388,6 +1387,9 @@ void ieee80211_mesh_init_sdata(struct ieee80211_sub_if_data *sdata) | |||
1388 | /* Allocate all mesh structures when creating the first mesh interface. */ | 1387 | /* Allocate all mesh structures when creating the first mesh interface. */ |
1389 | if (!mesh_allocated) | 1388 | if (!mesh_allocated) |
1390 | ieee80211s_init(); | 1389 | ieee80211s_init(); |
1390 | |||
1391 | mesh_pathtbl_init(sdata); | ||
1392 | |||
1391 | setup_timer(&ifmsh->mesh_path_timer, | 1393 | setup_timer(&ifmsh->mesh_path_timer, |
1392 | ieee80211_mesh_path_timer, | 1394 | ieee80211_mesh_path_timer, |
1393 | (unsigned long) sdata); | 1395 | (unsigned long) sdata); |
diff --git a/net/mac80211/mesh.h b/net/mac80211/mesh.h index 87c017a3b1ce..601992b6cd8a 100644 --- a/net/mac80211/mesh.h +++ b/net/mac80211/mesh.h | |||
@@ -300,8 +300,8 @@ void mesh_sta_cleanup(struct sta_info *sta); | |||
300 | 300 | ||
301 | /* Private interfaces */ | 301 | /* Private interfaces */ |
302 | /* Mesh tables */ | 302 | /* Mesh tables */ |
303 | void mesh_mpath_table_grow(void); | 303 | void mesh_mpath_table_grow(struct ieee80211_sub_if_data *sdata); |
304 | void mesh_mpp_table_grow(void); | 304 | void mesh_mpp_table_grow(struct ieee80211_sub_if_data *sdata); |
305 | /* Mesh paths */ | 305 | /* Mesh paths */ |
306 | int mesh_path_error_tx(struct ieee80211_sub_if_data *sdata, | 306 | int mesh_path_error_tx(struct ieee80211_sub_if_data *sdata, |
307 | u8 ttl, const u8 *target, u32 target_sn, | 307 | u8 ttl, const u8 *target, u32 target_sn, |
@@ -309,8 +309,8 @@ int mesh_path_error_tx(struct ieee80211_sub_if_data *sdata, | |||
309 | void mesh_path_assign_nexthop(struct mesh_path *mpath, struct sta_info *sta); | 309 | void mesh_path_assign_nexthop(struct mesh_path *mpath, struct sta_info *sta); |
310 | void mesh_path_flush_pending(struct mesh_path *mpath); | 310 | void mesh_path_flush_pending(struct mesh_path *mpath); |
311 | void mesh_path_tx_pending(struct mesh_path *mpath); | 311 | void mesh_path_tx_pending(struct mesh_path *mpath); |
312 | int mesh_pathtbl_init(void); | 312 | int mesh_pathtbl_init(struct ieee80211_sub_if_data *sdata); |
313 | void mesh_pathtbl_unregister(void); | 313 | void mesh_pathtbl_unregister(struct ieee80211_sub_if_data *sdata); |
314 | int mesh_path_del(struct ieee80211_sub_if_data *sdata, const u8 *addr); | 314 | int mesh_path_del(struct ieee80211_sub_if_data *sdata, const u8 *addr); |
315 | void mesh_path_timer(unsigned long data); | 315 | void mesh_path_timer(unsigned long data); |
316 | void mesh_path_flush_by_nexthop(struct sta_info *sta); | 316 | void mesh_path_flush_by_nexthop(struct sta_info *sta); |
@@ -319,8 +319,6 @@ void mesh_path_discard_frame(struct ieee80211_sub_if_data *sdata, | |||
319 | void mesh_path_tx_root_frame(struct ieee80211_sub_if_data *sdata); | 319 | void mesh_path_tx_root_frame(struct ieee80211_sub_if_data *sdata); |
320 | 320 | ||
321 | bool mesh_action_is_path_sel(struct ieee80211_mgmt *mgmt); | 321 | bool mesh_action_is_path_sel(struct ieee80211_mgmt *mgmt); |
322 | extern int mesh_paths_generation; | ||
323 | extern int mpp_paths_generation; | ||
324 | 322 | ||
325 | #ifdef CONFIG_MAC80211_MESH | 323 | #ifdef CONFIG_MAC80211_MESH |
326 | static inline | 324 | static inline |
diff --git a/net/mac80211/mesh_pathtbl.c b/net/mac80211/mesh_pathtbl.c index 2ba7aa56b11c..0508b37b0471 100644 --- a/net/mac80211/mesh_pathtbl.c +++ b/net/mac80211/mesh_pathtbl.c | |||
@@ -40,36 +40,24 @@ struct mpath_node { | |||
40 | struct mesh_path *mpath; | 40 | struct mesh_path *mpath; |
41 | }; | 41 | }; |
42 | 42 | ||
43 | static struct mesh_table __rcu *mesh_paths; | ||
44 | static struct mesh_table __rcu *mpp_paths; /* Store paths for MPP&MAP */ | ||
45 | |||
46 | int mesh_paths_generation; | ||
47 | int mpp_paths_generation; | ||
48 | |||
49 | /* This lock will have the grow table function as writer and add / delete nodes | ||
50 | * as readers. RCU provides sufficient protection only when reading the table | ||
51 | * (i.e. doing lookups). Adding or adding or removing nodes requires we take | ||
52 | * the read lock or we risk operating on an old table. The write lock is only | ||
53 | * needed when modifying the number of buckets a table. | ||
54 | */ | ||
55 | static DEFINE_RWLOCK(pathtbl_resize_lock); | ||
56 | |||
57 | |||
58 | static inline struct mesh_table *resize_dereference_paths( | 43 | static inline struct mesh_table *resize_dereference_paths( |
44 | struct ieee80211_sub_if_data *sdata, | ||
59 | struct mesh_table __rcu *table) | 45 | struct mesh_table __rcu *table) |
60 | { | 46 | { |
61 | return rcu_dereference_protected(table, | 47 | return rcu_dereference_protected(table, |
62 | lockdep_is_held(&pathtbl_resize_lock)); | 48 | lockdep_is_held(&sdata->u.mesh.pathtbl_resize_lock)); |
63 | } | 49 | } |
64 | 50 | ||
65 | static inline struct mesh_table *resize_dereference_mesh_paths(void) | 51 | static inline struct mesh_table *resize_dereference_mesh_paths( |
52 | struct ieee80211_sub_if_data *sdata) | ||
66 | { | 53 | { |
67 | return resize_dereference_paths(mesh_paths); | 54 | return resize_dereference_paths(sdata, sdata->u.mesh.mesh_paths); |
68 | } | 55 | } |
69 | 56 | ||
70 | static inline struct mesh_table *resize_dereference_mpp_paths(void) | 57 | static inline struct mesh_table *resize_dereference_mpp_paths( |
58 | struct ieee80211_sub_if_data *sdata) | ||
71 | { | 59 | { |
72 | return resize_dereference_paths(mpp_paths); | 60 | return resize_dereference_paths(sdata, sdata->u.mesh.mpp_paths); |
73 | } | 61 | } |
74 | 62 | ||
75 | /* | 63 | /* |
@@ -346,8 +334,7 @@ static struct mesh_path *mpath_lookup(struct mesh_table *tbl, const u8 *dst, | |||
346 | bucket = &tbl->hash_buckets[mesh_table_hash(dst, sdata, tbl)]; | 334 | bucket = &tbl->hash_buckets[mesh_table_hash(dst, sdata, tbl)]; |
347 | hlist_for_each_entry_rcu(node, bucket, list) { | 335 | hlist_for_each_entry_rcu(node, bucket, list) { |
348 | mpath = node->mpath; | 336 | mpath = node->mpath; |
349 | if (mpath->sdata == sdata && | 337 | if (ether_addr_equal(dst, mpath->dst)) { |
350 | ether_addr_equal(dst, mpath->dst)) { | ||
351 | if (mpath_expired(mpath)) { | 338 | if (mpath_expired(mpath)) { |
352 | spin_lock_bh(&mpath->state_lock); | 339 | spin_lock_bh(&mpath->state_lock); |
353 | mpath->flags &= ~MESH_PATH_ACTIVE; | 340 | mpath->flags &= ~MESH_PATH_ACTIVE; |
@@ -371,13 +358,15 @@ static struct mesh_path *mpath_lookup(struct mesh_table *tbl, const u8 *dst, | |||
371 | struct mesh_path * | 358 | struct mesh_path * |
372 | mesh_path_lookup(struct ieee80211_sub_if_data *sdata, const u8 *dst) | 359 | mesh_path_lookup(struct ieee80211_sub_if_data *sdata, const u8 *dst) |
373 | { | 360 | { |
374 | return mpath_lookup(rcu_dereference(mesh_paths), dst, sdata); | 361 | return mpath_lookup(rcu_dereference(sdata->u.mesh.mesh_paths), dst, |
362 | sdata); | ||
375 | } | 363 | } |
376 | 364 | ||
377 | struct mesh_path * | 365 | struct mesh_path * |
378 | mpp_path_lookup(struct ieee80211_sub_if_data *sdata, const u8 *dst) | 366 | mpp_path_lookup(struct ieee80211_sub_if_data *sdata, const u8 *dst) |
379 | { | 367 | { |
380 | return mpath_lookup(rcu_dereference(mpp_paths), dst, sdata); | 368 | return mpath_lookup(rcu_dereference(sdata->u.mesh.mpp_paths), dst, |
369 | sdata); | ||
381 | } | 370 | } |
382 | 371 | ||
383 | 372 | ||
@@ -393,14 +382,12 @@ mpp_path_lookup(struct ieee80211_sub_if_data *sdata, const u8 *dst) | |||
393 | struct mesh_path * | 382 | struct mesh_path * |
394 | mesh_path_lookup_by_idx(struct ieee80211_sub_if_data *sdata, int idx) | 383 | mesh_path_lookup_by_idx(struct ieee80211_sub_if_data *sdata, int idx) |
395 | { | 384 | { |
396 | struct mesh_table *tbl = rcu_dereference(mesh_paths); | 385 | struct mesh_table *tbl = rcu_dereference(sdata->u.mesh.mesh_paths); |
397 | struct mpath_node *node; | 386 | struct mpath_node *node; |
398 | int i; | 387 | int i; |
399 | int j = 0; | 388 | int j = 0; |
400 | 389 | ||
401 | for_each_mesh_entry(tbl, node, i) { | 390 | for_each_mesh_entry(tbl, node, i) { |
402 | if (sdata && node->mpath->sdata != sdata) | ||
403 | continue; | ||
404 | if (j++ == idx) { | 391 | if (j++ == idx) { |
405 | if (mpath_expired(node->mpath)) { | 392 | if (mpath_expired(node->mpath)) { |
406 | spin_lock_bh(&node->mpath->state_lock); | 393 | spin_lock_bh(&node->mpath->state_lock); |
@@ -426,14 +413,12 @@ mesh_path_lookup_by_idx(struct ieee80211_sub_if_data *sdata, int idx) | |||
426 | struct mesh_path * | 413 | struct mesh_path * |
427 | mpp_path_lookup_by_idx(struct ieee80211_sub_if_data *sdata, int idx) | 414 | mpp_path_lookup_by_idx(struct ieee80211_sub_if_data *sdata, int idx) |
428 | { | 415 | { |
429 | struct mesh_table *tbl = rcu_dereference(mpp_paths); | 416 | struct mesh_table *tbl = rcu_dereference(sdata->u.mesh.mpp_paths); |
430 | struct mpath_node *node; | 417 | struct mpath_node *node; |
431 | int i; | 418 | int i; |
432 | int j = 0; | 419 | int j = 0; |
433 | 420 | ||
434 | for_each_mesh_entry(tbl, node, i) { | 421 | for_each_mesh_entry(tbl, node, i) { |
435 | if (sdata && node->mpath->sdata != sdata) | ||
436 | continue; | ||
437 | if (j++ == idx) | 422 | if (j++ == idx) |
438 | return node->mpath; | 423 | return node->mpath; |
439 | } | 424 | } |
@@ -452,7 +437,7 @@ int mesh_path_add_gate(struct mesh_path *mpath) | |||
452 | int err; | 437 | int err; |
453 | 438 | ||
454 | rcu_read_lock(); | 439 | rcu_read_lock(); |
455 | tbl = rcu_dereference(mesh_paths); | 440 | tbl = rcu_dereference(mpath->sdata->u.mesh.mesh_paths); |
456 | 441 | ||
457 | hlist_for_each_entry_rcu(gate, tbl->known_gates, list) | 442 | hlist_for_each_entry_rcu(gate, tbl->known_gates, list) |
458 | if (gate->mpath == mpath) { | 443 | if (gate->mpath == mpath) { |
@@ -550,8 +535,8 @@ struct mesh_path *mesh_path_add(struct ieee80211_sub_if_data *sdata, | |||
550 | if (atomic_add_unless(&sdata->u.mesh.mpaths, 1, MESH_MAX_MPATHS) == 0) | 535 | if (atomic_add_unless(&sdata->u.mesh.mpaths, 1, MESH_MAX_MPATHS) == 0) |
551 | return ERR_PTR(-ENOSPC); | 536 | return ERR_PTR(-ENOSPC); |
552 | 537 | ||
553 | read_lock_bh(&pathtbl_resize_lock); | 538 | read_lock_bh(&sdata->u.mesh.pathtbl_resize_lock); |
554 | tbl = resize_dereference_mesh_paths(); | 539 | tbl = resize_dereference_mesh_paths(sdata); |
555 | 540 | ||
556 | hash_idx = mesh_table_hash(dst, sdata, tbl); | 541 | hash_idx = mesh_table_hash(dst, sdata, tbl); |
557 | bucket = &tbl->hash_buckets[hash_idx]; | 542 | bucket = &tbl->hash_buckets[hash_idx]; |
@@ -560,8 +545,7 @@ struct mesh_path *mesh_path_add(struct ieee80211_sub_if_data *sdata, | |||
560 | 545 | ||
561 | hlist_for_each_entry(node, bucket, list) { | 546 | hlist_for_each_entry(node, bucket, list) { |
562 | mpath = node->mpath; | 547 | mpath = node->mpath; |
563 | if (mpath->sdata == sdata && | 548 | if (ether_addr_equal(dst, mpath->dst)) |
564 | ether_addr_equal(dst, mpath->dst)) | ||
565 | goto found; | 549 | goto found; |
566 | } | 550 | } |
567 | 551 | ||
@@ -592,7 +576,7 @@ struct mesh_path *mesh_path_add(struct ieee80211_sub_if_data *sdata, | |||
592 | MEAN_CHAIN_LEN * (tbl->hash_mask + 1)) | 576 | MEAN_CHAIN_LEN * (tbl->hash_mask + 1)) |
593 | grow = 1; | 577 | grow = 1; |
594 | 578 | ||
595 | mesh_paths_generation++; | 579 | sdata->u.mesh.mesh_paths_generation++; |
596 | 580 | ||
597 | if (grow) { | 581 | if (grow) { |
598 | set_bit(MESH_WORK_GROW_MPATH_TABLE, &ifmsh->wrkq_flags); | 582 | set_bit(MESH_WORK_GROW_MPATH_TABLE, &ifmsh->wrkq_flags); |
@@ -601,7 +585,7 @@ struct mesh_path *mesh_path_add(struct ieee80211_sub_if_data *sdata, | |||
601 | mpath = new_mpath; | 585 | mpath = new_mpath; |
602 | found: | 586 | found: |
603 | spin_unlock(&tbl->hashwlock[hash_idx]); | 587 | spin_unlock(&tbl->hashwlock[hash_idx]); |
604 | read_unlock_bh(&pathtbl_resize_lock); | 588 | read_unlock_bh(&sdata->u.mesh.pathtbl_resize_lock); |
605 | return mpath; | 589 | return mpath; |
606 | 590 | ||
607 | err_node_alloc: | 591 | err_node_alloc: |
@@ -609,7 +593,7 @@ err_node_alloc: | |||
609 | err_path_alloc: | 593 | err_path_alloc: |
610 | atomic_dec(&sdata->u.mesh.mpaths); | 594 | atomic_dec(&sdata->u.mesh.mpaths); |
611 | spin_unlock(&tbl->hashwlock[hash_idx]); | 595 | spin_unlock(&tbl->hashwlock[hash_idx]); |
612 | read_unlock_bh(&pathtbl_resize_lock); | 596 | read_unlock_bh(&sdata->u.mesh.pathtbl_resize_lock); |
613 | return ERR_PTR(err); | 597 | return ERR_PTR(err); |
614 | } | 598 | } |
615 | 599 | ||
@@ -620,12 +604,12 @@ static void mesh_table_free_rcu(struct rcu_head *rcu) | |||
620 | mesh_table_free(tbl, false); | 604 | mesh_table_free(tbl, false); |
621 | } | 605 | } |
622 | 606 | ||
623 | void mesh_mpath_table_grow(void) | 607 | void mesh_mpath_table_grow(struct ieee80211_sub_if_data *sdata) |
624 | { | 608 | { |
625 | struct mesh_table *oldtbl, *newtbl; | 609 | struct mesh_table *oldtbl, *newtbl; |
626 | 610 | ||
627 | write_lock_bh(&pathtbl_resize_lock); | 611 | write_lock_bh(&sdata->u.mesh.pathtbl_resize_lock); |
628 | oldtbl = resize_dereference_mesh_paths(); | 612 | oldtbl = resize_dereference_mesh_paths(sdata); |
629 | newtbl = mesh_table_alloc(oldtbl->size_order + 1); | 613 | newtbl = mesh_table_alloc(oldtbl->size_order + 1); |
630 | if (!newtbl) | 614 | if (!newtbl) |
631 | goto out; | 615 | goto out; |
@@ -633,20 +617,20 @@ void mesh_mpath_table_grow(void) | |||
633 | __mesh_table_free(newtbl); | 617 | __mesh_table_free(newtbl); |
634 | goto out; | 618 | goto out; |
635 | } | 619 | } |
636 | rcu_assign_pointer(mesh_paths, newtbl); | 620 | rcu_assign_pointer(sdata->u.mesh.mesh_paths, newtbl); |
637 | 621 | ||
638 | call_rcu(&oldtbl->rcu_head, mesh_table_free_rcu); | 622 | call_rcu(&oldtbl->rcu_head, mesh_table_free_rcu); |
639 | 623 | ||
640 | out: | 624 | out: |
641 | write_unlock_bh(&pathtbl_resize_lock); | 625 | write_unlock_bh(&sdata->u.mesh.pathtbl_resize_lock); |
642 | } | 626 | } |
643 | 627 | ||
644 | void mesh_mpp_table_grow(void) | 628 | void mesh_mpp_table_grow(struct ieee80211_sub_if_data *sdata) |
645 | { | 629 | { |
646 | struct mesh_table *oldtbl, *newtbl; | 630 | struct mesh_table *oldtbl, *newtbl; |
647 | 631 | ||
648 | write_lock_bh(&pathtbl_resize_lock); | 632 | write_lock_bh(&sdata->u.mesh.pathtbl_resize_lock); |
649 | oldtbl = resize_dereference_mpp_paths(); | 633 | oldtbl = resize_dereference_mpp_paths(sdata); |
650 | newtbl = mesh_table_alloc(oldtbl->size_order + 1); | 634 | newtbl = mesh_table_alloc(oldtbl->size_order + 1); |
651 | if (!newtbl) | 635 | if (!newtbl) |
652 | goto out; | 636 | goto out; |
@@ -654,11 +638,11 @@ void mesh_mpp_table_grow(void) | |||
654 | __mesh_table_free(newtbl); | 638 | __mesh_table_free(newtbl); |
655 | goto out; | 639 | goto out; |
656 | } | 640 | } |
657 | rcu_assign_pointer(mpp_paths, newtbl); | 641 | rcu_assign_pointer(sdata->u.mesh.mpp_paths, newtbl); |
658 | call_rcu(&oldtbl->rcu_head, mesh_table_free_rcu); | 642 | call_rcu(&oldtbl->rcu_head, mesh_table_free_rcu); |
659 | 643 | ||
660 | out: | 644 | out: |
661 | write_unlock_bh(&pathtbl_resize_lock); | 645 | write_unlock_bh(&sdata->u.mesh.pathtbl_resize_lock); |
662 | } | 646 | } |
663 | 647 | ||
664 | int mpp_path_add(struct ieee80211_sub_if_data *sdata, | 648 | int mpp_path_add(struct ieee80211_sub_if_data *sdata, |
@@ -690,7 +674,7 @@ int mpp_path_add(struct ieee80211_sub_if_data *sdata, | |||
690 | if (!new_node) | 674 | if (!new_node) |
691 | goto err_node_alloc; | 675 | goto err_node_alloc; |
692 | 676 | ||
693 | read_lock_bh(&pathtbl_resize_lock); | 677 | read_lock_bh(&sdata->u.mesh.pathtbl_resize_lock); |
694 | memcpy(new_mpath->dst, dst, ETH_ALEN); | 678 | memcpy(new_mpath->dst, dst, ETH_ALEN); |
695 | memcpy(new_mpath->mpp, mpp, ETH_ALEN); | 679 | memcpy(new_mpath->mpp, mpp, ETH_ALEN); |
696 | new_mpath->sdata = sdata; | 680 | new_mpath->sdata = sdata; |
@@ -701,7 +685,7 @@ int mpp_path_add(struct ieee80211_sub_if_data *sdata, | |||
701 | new_mpath->exp_time = jiffies; | 685 | new_mpath->exp_time = jiffies; |
702 | spin_lock_init(&new_mpath->state_lock); | 686 | spin_lock_init(&new_mpath->state_lock); |
703 | 687 | ||
704 | tbl = resize_dereference_mpp_paths(); | 688 | tbl = resize_dereference_mpp_paths(sdata); |
705 | 689 | ||
706 | hash_idx = mesh_table_hash(dst, sdata, tbl); | 690 | hash_idx = mesh_table_hash(dst, sdata, tbl); |
707 | bucket = &tbl->hash_buckets[hash_idx]; | 691 | bucket = &tbl->hash_buckets[hash_idx]; |
@@ -711,8 +695,7 @@ int mpp_path_add(struct ieee80211_sub_if_data *sdata, | |||
711 | err = -EEXIST; | 695 | err = -EEXIST; |
712 | hlist_for_each_entry(node, bucket, list) { | 696 | hlist_for_each_entry(node, bucket, list) { |
713 | mpath = node->mpath; | 697 | mpath = node->mpath; |
714 | if (mpath->sdata == sdata && | 698 | if (ether_addr_equal(dst, mpath->dst)) |
715 | ether_addr_equal(dst, mpath->dst)) | ||
716 | goto err_exists; | 699 | goto err_exists; |
717 | } | 700 | } |
718 | 701 | ||
@@ -722,9 +705,9 @@ int mpp_path_add(struct ieee80211_sub_if_data *sdata, | |||
722 | grow = 1; | 705 | grow = 1; |
723 | 706 | ||
724 | spin_unlock(&tbl->hashwlock[hash_idx]); | 707 | spin_unlock(&tbl->hashwlock[hash_idx]); |
725 | read_unlock_bh(&pathtbl_resize_lock); | 708 | read_unlock_bh(&sdata->u.mesh.pathtbl_resize_lock); |
726 | 709 | ||
727 | mpp_paths_generation++; | 710 | sdata->u.mesh.mpp_paths_generation++; |
728 | 711 | ||
729 | if (grow) { | 712 | if (grow) { |
730 | set_bit(MESH_WORK_GROW_MPP_TABLE, &ifmsh->wrkq_flags); | 713 | set_bit(MESH_WORK_GROW_MPP_TABLE, &ifmsh->wrkq_flags); |
@@ -734,7 +717,7 @@ int mpp_path_add(struct ieee80211_sub_if_data *sdata, | |||
734 | 717 | ||
735 | err_exists: | 718 | err_exists: |
736 | spin_unlock(&tbl->hashwlock[hash_idx]); | 719 | spin_unlock(&tbl->hashwlock[hash_idx]); |
737 | read_unlock_bh(&pathtbl_resize_lock); | 720 | read_unlock_bh(&sdata->u.mesh.pathtbl_resize_lock); |
738 | kfree(new_node); | 721 | kfree(new_node); |
739 | err_node_alloc: | 722 | err_node_alloc: |
740 | kfree(new_mpath); | 723 | kfree(new_mpath); |
@@ -761,7 +744,7 @@ void mesh_plink_broken(struct sta_info *sta) | |||
761 | int i; | 744 | int i; |
762 | 745 | ||
763 | rcu_read_lock(); | 746 | rcu_read_lock(); |
764 | tbl = rcu_dereference(mesh_paths); | 747 | tbl = rcu_dereference(sdata->u.mesh.mesh_paths); |
765 | for_each_mesh_entry(tbl, node, i) { | 748 | for_each_mesh_entry(tbl, node, i) { |
766 | mpath = node->mpath; | 749 | mpath = node->mpath; |
767 | if (rcu_access_pointer(mpath->next_hop) == sta && | 750 | if (rcu_access_pointer(mpath->next_hop) == sta && |
@@ -819,14 +802,15 @@ static void __mesh_path_del(struct mesh_table *tbl, struct mpath_node *node) | |||
819 | */ | 802 | */ |
820 | void mesh_path_flush_by_nexthop(struct sta_info *sta) | 803 | void mesh_path_flush_by_nexthop(struct sta_info *sta) |
821 | { | 804 | { |
805 | struct ieee80211_sub_if_data *sdata = sta->sdata; | ||
822 | struct mesh_table *tbl; | 806 | struct mesh_table *tbl; |
823 | struct mesh_path *mpath; | 807 | struct mesh_path *mpath; |
824 | struct mpath_node *node; | 808 | struct mpath_node *node; |
825 | int i; | 809 | int i; |
826 | 810 | ||
827 | rcu_read_lock(); | 811 | rcu_read_lock(); |
828 | read_lock_bh(&pathtbl_resize_lock); | 812 | read_lock_bh(&sdata->u.mesh.pathtbl_resize_lock); |
829 | tbl = resize_dereference_mesh_paths(); | 813 | tbl = resize_dereference_mesh_paths(sdata); |
830 | for_each_mesh_entry(tbl, node, i) { | 814 | for_each_mesh_entry(tbl, node, i) { |
831 | mpath = node->mpath; | 815 | mpath = node->mpath; |
832 | if (rcu_access_pointer(mpath->next_hop) == sta) { | 816 | if (rcu_access_pointer(mpath->next_hop) == sta) { |
@@ -835,7 +819,7 @@ void mesh_path_flush_by_nexthop(struct sta_info *sta) | |||
835 | spin_unlock(&tbl->hashwlock[i]); | 819 | spin_unlock(&tbl->hashwlock[i]); |
836 | } | 820 | } |
837 | } | 821 | } |
838 | read_unlock_bh(&pathtbl_resize_lock); | 822 | read_unlock_bh(&sdata->u.mesh.pathtbl_resize_lock); |
839 | rcu_read_unlock(); | 823 | rcu_read_unlock(); |
840 | } | 824 | } |
841 | 825 | ||
@@ -848,8 +832,8 @@ static void mpp_flush_by_proxy(struct ieee80211_sub_if_data *sdata, | |||
848 | int i; | 832 | int i; |
849 | 833 | ||
850 | rcu_read_lock(); | 834 | rcu_read_lock(); |
851 | read_lock_bh(&pathtbl_resize_lock); | 835 | read_lock_bh(&sdata->u.mesh.pathtbl_resize_lock); |
852 | tbl = resize_dereference_mpp_paths(); | 836 | tbl = resize_dereference_mpp_paths(sdata); |
853 | for_each_mesh_entry(tbl, node, i) { | 837 | for_each_mesh_entry(tbl, node, i) { |
854 | mpp = node->mpath; | 838 | mpp = node->mpath; |
855 | if (ether_addr_equal(mpp->mpp, proxy)) { | 839 | if (ether_addr_equal(mpp->mpp, proxy)) { |
@@ -858,7 +842,7 @@ static void mpp_flush_by_proxy(struct ieee80211_sub_if_data *sdata, | |||
858 | spin_unlock(&tbl->hashwlock[i]); | 842 | spin_unlock(&tbl->hashwlock[i]); |
859 | } | 843 | } |
860 | } | 844 | } |
861 | read_unlock_bh(&pathtbl_resize_lock); | 845 | read_unlock_bh(&sdata->u.mesh.pathtbl_resize_lock); |
862 | rcu_read_unlock(); | 846 | rcu_read_unlock(); |
863 | } | 847 | } |
864 | 848 | ||
@@ -872,8 +856,6 @@ static void table_flush_by_iface(struct mesh_table *tbl, | |||
872 | WARN_ON(!rcu_read_lock_held()); | 856 | WARN_ON(!rcu_read_lock_held()); |
873 | for_each_mesh_entry(tbl, node, i) { | 857 | for_each_mesh_entry(tbl, node, i) { |
874 | mpath = node->mpath; | 858 | mpath = node->mpath; |
875 | if (mpath->sdata != sdata) | ||
876 | continue; | ||
877 | spin_lock_bh(&tbl->hashwlock[i]); | 859 | spin_lock_bh(&tbl->hashwlock[i]); |
878 | __mesh_path_del(tbl, node); | 860 | __mesh_path_del(tbl, node); |
879 | spin_unlock_bh(&tbl->hashwlock[i]); | 861 | spin_unlock_bh(&tbl->hashwlock[i]); |
@@ -893,12 +875,12 @@ void mesh_path_flush_by_iface(struct ieee80211_sub_if_data *sdata) | |||
893 | struct mesh_table *tbl; | 875 | struct mesh_table *tbl; |
894 | 876 | ||
895 | rcu_read_lock(); | 877 | rcu_read_lock(); |
896 | read_lock_bh(&pathtbl_resize_lock); | 878 | read_lock_bh(&sdata->u.mesh.pathtbl_resize_lock); |
897 | tbl = resize_dereference_mesh_paths(); | 879 | tbl = resize_dereference_mesh_paths(sdata); |
898 | table_flush_by_iface(tbl, sdata); | 880 | table_flush_by_iface(tbl, sdata); |
899 | tbl = resize_dereference_mpp_paths(); | 881 | tbl = resize_dereference_mpp_paths(sdata); |
900 | table_flush_by_iface(tbl, sdata); | 882 | table_flush_by_iface(tbl, sdata); |
901 | read_unlock_bh(&pathtbl_resize_lock); | 883 | read_unlock_bh(&sdata->u.mesh.pathtbl_resize_lock); |
902 | rcu_read_unlock(); | 884 | rcu_read_unlock(); |
903 | } | 885 | } |
904 | 886 | ||
@@ -922,15 +904,14 @@ static int table_path_del(struct mesh_table __rcu *rcu_tbl, | |||
922 | int hash_idx; | 904 | int hash_idx; |
923 | int err = 0; | 905 | int err = 0; |
924 | 906 | ||
925 | tbl = resize_dereference_paths(rcu_tbl); | 907 | tbl = resize_dereference_paths(sdata, rcu_tbl); |
926 | hash_idx = mesh_table_hash(addr, sdata, tbl); | 908 | hash_idx = mesh_table_hash(addr, sdata, tbl); |
927 | bucket = &tbl->hash_buckets[hash_idx]; | 909 | bucket = &tbl->hash_buckets[hash_idx]; |
928 | 910 | ||
929 | spin_lock(&tbl->hashwlock[hash_idx]); | 911 | spin_lock(&tbl->hashwlock[hash_idx]); |
930 | hlist_for_each_entry(node, bucket, list) { | 912 | hlist_for_each_entry(node, bucket, list) { |
931 | mpath = node->mpath; | 913 | mpath = node->mpath; |
932 | if (mpath->sdata == sdata && | 914 | if (ether_addr_equal(addr, mpath->dst)) { |
933 | ether_addr_equal(addr, mpath->dst)) { | ||
934 | __mesh_path_del(tbl, node); | 915 | __mesh_path_del(tbl, node); |
935 | goto enddel; | 916 | goto enddel; |
936 | } | 917 | } |
@@ -957,10 +938,10 @@ int mesh_path_del(struct ieee80211_sub_if_data *sdata, const u8 *addr) | |||
957 | /* flush relevant mpp entries first */ | 938 | /* flush relevant mpp entries first */ |
958 | mpp_flush_by_proxy(sdata, addr); | 939 | mpp_flush_by_proxy(sdata, addr); |
959 | 940 | ||
960 | read_lock_bh(&pathtbl_resize_lock); | 941 | read_lock_bh(&sdata->u.mesh.pathtbl_resize_lock); |
961 | err = table_path_del(mesh_paths, sdata, addr); | 942 | err = table_path_del(sdata->u.mesh.mesh_paths, sdata, addr); |
962 | mesh_paths_generation++; | 943 | sdata->u.mesh.mesh_paths_generation++; |
963 | read_unlock_bh(&pathtbl_resize_lock); | 944 | read_unlock_bh(&sdata->u.mesh.pathtbl_resize_lock); |
964 | 945 | ||
965 | return err; | 946 | return err; |
966 | } | 947 | } |
@@ -977,10 +958,10 @@ static int mpp_path_del(struct ieee80211_sub_if_data *sdata, const u8 *addr) | |||
977 | { | 958 | { |
978 | int err = 0; | 959 | int err = 0; |
979 | 960 | ||
980 | read_lock_bh(&pathtbl_resize_lock); | 961 | read_lock_bh(&sdata->u.mesh.pathtbl_resize_lock); |
981 | err = table_path_del(mpp_paths, sdata, addr); | 962 | err = table_path_del(sdata->u.mesh.mpp_paths, sdata, addr); |
982 | mpp_paths_generation++; | 963 | sdata->u.mesh.mpp_paths_generation++; |
983 | read_unlock_bh(&pathtbl_resize_lock); | 964 | read_unlock_bh(&sdata->u.mesh.pathtbl_resize_lock); |
984 | 965 | ||
985 | return err; | 966 | return err; |
986 | } | 967 | } |
@@ -1020,7 +1001,7 @@ int mesh_path_send_to_gates(struct mesh_path *mpath) | |||
1020 | struct hlist_head *known_gates; | 1001 | struct hlist_head *known_gates; |
1021 | 1002 | ||
1022 | rcu_read_lock(); | 1003 | rcu_read_lock(); |
1023 | tbl = rcu_dereference(mesh_paths); | 1004 | tbl = rcu_dereference(sdata->u.mesh.mesh_paths); |
1024 | known_gates = tbl->known_gates; | 1005 | known_gates = tbl->known_gates; |
1025 | rcu_read_unlock(); | 1006 | rcu_read_unlock(); |
1026 | 1007 | ||
@@ -1028,9 +1009,6 @@ int mesh_path_send_to_gates(struct mesh_path *mpath) | |||
1028 | return -EHOSTUNREACH; | 1009 | return -EHOSTUNREACH; |
1029 | 1010 | ||
1030 | hlist_for_each_entry_rcu(gate, known_gates, list) { | 1011 | hlist_for_each_entry_rcu(gate, known_gates, list) { |
1031 | if (gate->mpath->sdata != sdata) | ||
1032 | continue; | ||
1033 | |||
1034 | if (gate->mpath->flags & MESH_PATH_ACTIVE) { | 1012 | if (gate->mpath->flags & MESH_PATH_ACTIVE) { |
1035 | mpath_dbg(sdata, "Forwarding to %pM\n", gate->mpath->dst); | 1013 | mpath_dbg(sdata, "Forwarding to %pM\n", gate->mpath->dst); |
1036 | mesh_path_move_to_queue(gate->mpath, from_mpath, copy); | 1014 | mesh_path_move_to_queue(gate->mpath, from_mpath, copy); |
@@ -1043,11 +1021,10 @@ int mesh_path_send_to_gates(struct mesh_path *mpath) | |||
1043 | } | 1021 | } |
1044 | } | 1022 | } |
1045 | 1023 | ||
1046 | hlist_for_each_entry_rcu(gate, known_gates, list) | 1024 | hlist_for_each_entry_rcu(gate, known_gates, list) { |
1047 | if (gate->mpath->sdata == sdata) { | 1025 | mpath_dbg(sdata, "Sending to %pM\n", gate->mpath->dst); |
1048 | mpath_dbg(sdata, "Sending to %pM\n", gate->mpath->dst); | 1026 | mesh_path_tx_pending(gate->mpath); |
1049 | mesh_path_tx_pending(gate->mpath); | 1027 | } |
1050 | } | ||
1051 | 1028 | ||
1052 | return (from_mpath == mpath) ? -EHOSTUNREACH : 0; | 1029 | return (from_mpath == mpath) ? -EHOSTUNREACH : 0; |
1053 | } | 1030 | } |
@@ -1136,7 +1113,7 @@ static int mesh_path_node_copy(struct hlist_node *p, struct mesh_table *newtbl) | |||
1136 | return 0; | 1113 | return 0; |
1137 | } | 1114 | } |
1138 | 1115 | ||
1139 | int mesh_pathtbl_init(void) | 1116 | int mesh_pathtbl_init(struct ieee80211_sub_if_data *sdata) |
1140 | { | 1117 | { |
1141 | struct mesh_table *tbl_path, *tbl_mpp; | 1118 | struct mesh_table *tbl_path, *tbl_mpp; |
1142 | int ret; | 1119 | int ret; |
@@ -1168,9 +1145,11 @@ int mesh_pathtbl_init(void) | |||
1168 | } | 1145 | } |
1169 | INIT_HLIST_HEAD(tbl_mpp->known_gates); | 1146 | INIT_HLIST_HEAD(tbl_mpp->known_gates); |
1170 | 1147 | ||
1148 | rwlock_init(&sdata->u.mesh.pathtbl_resize_lock); | ||
1149 | |||
1171 | /* Need no locking since this is during init */ | 1150 | /* Need no locking since this is during init */ |
1172 | RCU_INIT_POINTER(mesh_paths, tbl_path); | 1151 | RCU_INIT_POINTER(sdata->u.mesh.mesh_paths, tbl_path); |
1173 | RCU_INIT_POINTER(mpp_paths, tbl_mpp); | 1152 | RCU_INIT_POINTER(sdata->u.mesh.mpp_paths, tbl_mpp); |
1174 | 1153 | ||
1175 | return 0; | 1154 | return 0; |
1176 | 1155 | ||
@@ -1189,33 +1168,31 @@ void mesh_path_expire(struct ieee80211_sub_if_data *sdata) | |||
1189 | int i; | 1168 | int i; |
1190 | 1169 | ||
1191 | rcu_read_lock(); | 1170 | rcu_read_lock(); |
1192 | tbl = rcu_dereference(mesh_paths); | 1171 | tbl = rcu_dereference(sdata->u.mesh.mesh_paths); |
1193 | for_each_mesh_entry(tbl, node, i) { | 1172 | for_each_mesh_entry(tbl, node, i) { |
1194 | if (node->mpath->sdata != sdata) | ||
1195 | continue; | ||
1196 | mpath = node->mpath; | 1173 | mpath = node->mpath; |
1197 | if ((!(mpath->flags & MESH_PATH_RESOLVING)) && | 1174 | if ((!(mpath->flags & MESH_PATH_RESOLVING)) && |
1198 | (!(mpath->flags & MESH_PATH_FIXED)) && | 1175 | (!(mpath->flags & MESH_PATH_FIXED)) && |
1199 | time_after(jiffies, mpath->exp_time + MESH_PATH_EXPIRE)) | 1176 | time_after(jiffies, mpath->exp_time + MESH_PATH_EXPIRE)) |
1200 | mesh_path_del(mpath->sdata, mpath->dst); | 1177 | mesh_path_del(sdata, mpath->dst); |
1201 | } | 1178 | } |
1202 | 1179 | ||
1203 | tbl = rcu_dereference(mpp_paths); | 1180 | tbl = rcu_dereference(sdata->u.mesh.mpp_paths); |
1204 | for_each_mesh_entry(tbl, node, i) { | 1181 | for_each_mesh_entry(tbl, node, i) { |
1205 | if (node->mpath->sdata != sdata) | ||
1206 | continue; | ||
1207 | mpath = node->mpath; | 1182 | mpath = node->mpath; |
1208 | if ((!(mpath->flags & MESH_PATH_FIXED)) && | 1183 | if ((!(mpath->flags & MESH_PATH_FIXED)) && |
1209 | time_after(jiffies, mpath->exp_time + MESH_PATH_EXPIRE)) | 1184 | time_after(jiffies, mpath->exp_time + MESH_PATH_EXPIRE)) |
1210 | mpp_path_del(mpath->sdata, mpath->dst); | 1185 | mpp_path_del(sdata, mpath->dst); |
1211 | } | 1186 | } |
1212 | 1187 | ||
1213 | rcu_read_unlock(); | 1188 | rcu_read_unlock(); |
1214 | } | 1189 | } |
1215 | 1190 | ||
1216 | void mesh_pathtbl_unregister(void) | 1191 | void mesh_pathtbl_unregister(struct ieee80211_sub_if_data *sdata) |
1217 | { | 1192 | { |
1218 | /* no need for locking during exit path */ | 1193 | /* no need for locking during exit path */ |
1219 | mesh_table_free(rcu_dereference_protected(mesh_paths, 1), true); | 1194 | mesh_table_free(rcu_dereference_protected(sdata->u.mesh.mesh_paths, 1), |
1220 | mesh_table_free(rcu_dereference_protected(mpp_paths, 1), true); | 1195 | true); |
1196 | mesh_table_free(rcu_dereference_protected(sdata->u.mesh.mpp_paths, 1), | ||
1197 | true); | ||
1221 | } | 1198 | } |
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c index c485fc26fa0c..b3196b1e15c2 100644 --- a/net/mac80211/tx.c +++ b/net/mac80211/tx.c | |||
@@ -2212,7 +2212,7 @@ static struct sk_buff *ieee80211_build_hdr(struct ieee80211_sub_if_data *sdata, | |||
2212 | } | 2212 | } |
2213 | 2213 | ||
2214 | if (mppath && mpath) | 2214 | if (mppath && mpath) |
2215 | mesh_path_del(mpath->sdata, mpath->dst); | 2215 | mesh_path_del(sdata, mpath->dst); |
2216 | } | 2216 | } |
2217 | 2217 | ||
2218 | /* | 2218 | /* |