aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorBob Copeland <me@bobcopeland.com>2016-03-02 10:09:20 -0500
committerJohannes Berg <johannes.berg@intel.com>2016-04-05 04:56:33 -0400
commit60854fd94573f0d3b80b55b40cf0140a0430f3ab (patch)
treef39d8d873824d2aa149b0c88a2310eec26d198d3
parent8f6fd83c6c5ec66a4a70c728535ddcdfef4f3697 (diff)
mac80211: mesh: convert path table to rhashtable
In the time since the mesh path table was implemented as an RCU-traversable, dynamically growing hash table, a generic RCU hashtable implementation was added to the kernel. Switch the mesh path table over to rhashtable to remove some code and also gain some features like automatic shrinking. Cc: Thomas Graf <tgraf@suug.ch> Cc: netdev@vger.kernel.org Signed-off-by: Bob Copeland <me@bobcopeland.com> Signed-off-by: Johannes Berg <johannes.berg@intel.com>
-rw-r--r--net/mac80211/ieee80211_i.h11
-rw-r--r--net/mac80211/mesh.c6
-rw-r--r--net/mac80211/mesh.h31
-rw-r--r--net/mac80211/mesh_pathtbl.c786
4 files changed, 259 insertions, 575 deletions
diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
index db7f0dbebc4b..c8945e2d8a86 100644
--- a/net/mac80211/ieee80211_i.h
+++ b/net/mac80211/ieee80211_i.h
@@ -697,17 +697,10 @@ struct ieee80211_if_mesh {
697 /* offset from skb->data while building IE */ 697 /* offset from skb->data while building IE */
698 int meshconf_offset; 698 int meshconf_offset;
699 699
700 struct mesh_table __rcu *mesh_paths; 700 struct mesh_table *mesh_paths;
701 struct mesh_table __rcu *mpp_paths; /* Store paths for MPP&MAP */ 701 struct mesh_table *mpp_paths; /* Store paths for MPP&MAP */
702 int mesh_paths_generation; 702 int mesh_paths_generation;
703 int mpp_paths_generation; 703 int mpp_paths_generation;
704
705 /* Protects assignment of the mesh_paths/mpp_paths table
706 * pointer for resize against reading it for add/delete
707 * of individual paths. Pure readers (lookups) just use
708 * RCU.
709 */
710 rwlock_t pathtbl_resize_lock;
711}; 704};
712 705
713#ifdef CONFIG_MAC80211_MESH 706#ifdef CONFIG_MAC80211_MESH
diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c
index c92af2a7714d..a216c439b6f2 100644
--- a/net/mac80211/mesh.c
+++ b/net/mac80211/mesh.c
@@ -1347,12 +1347,6 @@ void ieee80211_mesh_work(struct ieee80211_sub_if_data *sdata)
1347 ifmsh->last_preq + msecs_to_jiffies(ifmsh->mshcfg.dot11MeshHWMPpreqMinInterval))) 1347 ifmsh->last_preq + msecs_to_jiffies(ifmsh->mshcfg.dot11MeshHWMPpreqMinInterval)))
1348 mesh_path_start_discovery(sdata); 1348 mesh_path_start_discovery(sdata);
1349 1349
1350 if (test_and_clear_bit(MESH_WORK_GROW_MPATH_TABLE, &ifmsh->wrkq_flags))
1351 mesh_mpath_table_grow(sdata);
1352
1353 if (test_and_clear_bit(MESH_WORK_GROW_MPP_TABLE, &ifmsh->wrkq_flags))
1354 mesh_mpp_table_grow(sdata);
1355
1356 if (test_and_clear_bit(MESH_WORK_HOUSEKEEPING, &ifmsh->wrkq_flags)) 1350 if (test_and_clear_bit(MESH_WORK_HOUSEKEEPING, &ifmsh->wrkq_flags))
1357 ieee80211_mesh_housekeeping(sdata); 1351 ieee80211_mesh_housekeeping(sdata);
1358 1352
diff --git a/net/mac80211/mesh.h b/net/mac80211/mesh.h
index f3cc3917e048..cc6854db156e 100644
--- a/net/mac80211/mesh.h
+++ b/net/mac80211/mesh.h
@@ -51,10 +51,6 @@ enum mesh_path_flags {
51 * 51 *
52 * 52 *
53 * @MESH_WORK_HOUSEKEEPING: run the periodic mesh housekeeping tasks 53 * @MESH_WORK_HOUSEKEEPING: run the periodic mesh housekeeping tasks
54 * @MESH_WORK_GROW_MPATH_TABLE: the mesh path table is full and needs
55 * to grow.
56 * @MESH_WORK_GROW_MPP_TABLE: the mesh portals table is full and needs to
57 * grow
58 * @MESH_WORK_ROOT: the mesh root station needs to send a frame 54 * @MESH_WORK_ROOT: the mesh root station needs to send a frame
59 * @MESH_WORK_DRIFT_ADJUST: time to compensate for clock drift relative to other 55 * @MESH_WORK_DRIFT_ADJUST: time to compensate for clock drift relative to other
60 * mesh nodes 56 * mesh nodes
@@ -62,8 +58,6 @@ enum mesh_path_flags {
62 */ 58 */
63enum mesh_deferred_task_flags { 59enum mesh_deferred_task_flags {
64 MESH_WORK_HOUSEKEEPING, 60 MESH_WORK_HOUSEKEEPING,
65 MESH_WORK_GROW_MPATH_TABLE,
66 MESH_WORK_GROW_MPP_TABLE,
67 MESH_WORK_ROOT, 61 MESH_WORK_ROOT,
68 MESH_WORK_DRIFT_ADJUST, 62 MESH_WORK_DRIFT_ADJUST,
69 MESH_WORK_MBSS_CHANGED, 63 MESH_WORK_MBSS_CHANGED,
@@ -105,6 +99,7 @@ enum mesh_deferred_task_flags {
105struct mesh_path { 99struct mesh_path {
106 u8 dst[ETH_ALEN]; 100 u8 dst[ETH_ALEN];
107 u8 mpp[ETH_ALEN]; /* used for MPP or MAP */ 101 u8 mpp[ETH_ALEN]; /* used for MPP or MAP */
102 struct rhash_head rhash;
108 struct hlist_node gate_list; 103 struct hlist_node gate_list;
109 struct ieee80211_sub_if_data *sdata; 104 struct ieee80211_sub_if_data *sdata;
110 struct sta_info __rcu *next_hop; 105 struct sta_info __rcu *next_hop;
@@ -129,34 +124,17 @@ struct mesh_path {
129/** 124/**
130 * struct mesh_table 125 * struct mesh_table
131 * 126 *
132 * @hash_buckets: array of hash buckets of the table
133 * @hashwlock: array of locks to protect write operations, one per bucket
134 * @hash_mask: 2^size_order - 1, used to compute hash idx
135 * @hash_rnd: random value used for hash computations
136 * @entries: number of entries in the table 127 * @entries: number of entries in the table
137 * @free_node: function to free nodes of the table
138 * @copy_node: function to copy nodes of the table
139 * @size_order: determines size of the table, there will be 2^size_order hash
140 * buckets
141 * @known_gates: list of known mesh gates and their mpaths by the station. The 128 * @known_gates: list of known mesh gates and their mpaths by the station. The
142 * gate's mpath may or may not be resolved and active. 129 * gate's mpath may or may not be resolved and active.
143 * 130 * @rhash: the rhashtable containing struct mesh_paths, keyed by dest addr
144 * rcu_head: RCU head to free the table
145 */ 131 */
146struct mesh_table { 132struct mesh_table {
147 /* Number of buckets will be 2^N */
148 struct hlist_head *hash_buckets;
149 spinlock_t *hashwlock; /* One per bucket, for add/del */
150 unsigned int hash_mask; /* (2^size_order) - 1 */
151 __u32 hash_rnd; /* Used for hash generation */
152 atomic_t entries; /* Up to MAX_MESH_NEIGHBOURS */ 133 atomic_t entries; /* Up to MAX_MESH_NEIGHBOURS */
153 void (*free_node) (struct hlist_node *p, bool free_leafs);
154 int (*copy_node) (struct hlist_node *p, struct mesh_table *newtbl);
155 int size_order;
156 struct hlist_head *known_gates; 134 struct hlist_head *known_gates;
157 spinlock_t gates_lock; 135 spinlock_t gates_lock;
158 136
159 struct rcu_head rcu_head; 137 struct rhashtable rhead;
160}; 138};
161 139
162/* Recent multicast cache */ 140/* Recent multicast cache */
@@ -300,9 +278,6 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata,
300void mesh_sta_cleanup(struct sta_info *sta); 278void mesh_sta_cleanup(struct sta_info *sta);
301 279
302/* Private interfaces */ 280/* Private interfaces */
303/* Mesh tables */
304void mesh_mpath_table_grow(struct ieee80211_sub_if_data *sdata);
305void mesh_mpp_table_grow(struct ieee80211_sub_if_data *sdata);
306/* Mesh paths */ 281/* Mesh paths */
307int mesh_path_error_tx(struct ieee80211_sub_if_data *sdata, 282int mesh_path_error_tx(struct ieee80211_sub_if_data *sdata,
308 u8 ttl, const u8 *target, u32 target_sn, 283 u8 ttl, const u8 *target, u32 target_sn,
diff --git a/net/mac80211/mesh_pathtbl.c b/net/mac80211/mesh_pathtbl.c
index e4daf4b94eaf..7455397f8c3b 100644
--- a/net/mac80211/mesh_pathtbl.c
+++ b/net/mac80211/mesh_pathtbl.c
@@ -18,11 +18,20 @@
18#include "ieee80211_i.h" 18#include "ieee80211_i.h"
19#include "mesh.h" 19#include "mesh.h"
20 20
21/* There will be initially 2^INIT_PATHS_SIZE_ORDER buckets */ 21static u32 mesh_table_hash(const void *addr, u32 len, u32 seed)
22#define INIT_PATHS_SIZE_ORDER 2 22{
23 /* Use last four bytes of hw addr as hash index */
24 return jhash_1word(*(u32 *)(addr+2), seed);
25}
23 26
24/* Keep the mean chain length below this constant */ 27static const struct rhashtable_params mesh_rht_params = {
25#define MEAN_CHAIN_LEN 2 28 .nelem_hint = 2,
29 .automatic_shrinking = true,
30 .key_len = ETH_ALEN,
31 .key_offset = offsetof(struct mesh_path, dst),
32 .head_offset = offsetof(struct mesh_path, rhash),
33 .hashfn = mesh_table_hash,
34};
26 35
27static inline bool mpath_expired(struct mesh_path *mpath) 36static inline bool mpath_expired(struct mesh_path *mpath)
28{ 37{
@@ -31,157 +40,47 @@ static inline bool mpath_expired(struct mesh_path *mpath)
31 !(mpath->flags & MESH_PATH_FIXED); 40 !(mpath->flags & MESH_PATH_FIXED);
32} 41}
33 42
34struct mpath_node { 43static void mesh_path_reclaim(struct rcu_head *rp)
35 struct hlist_node list;
36 struct rcu_head rcu;
37 /* This indirection allows two different tables to point to the same
38 * mesh_path structure, useful when resizing
39 */
40 struct mesh_path *mpath;
41};
42
43static inline struct mesh_table *resize_dereference_paths(
44 struct ieee80211_sub_if_data *sdata,
45 struct mesh_table __rcu *table)
46{ 44{
47 return rcu_dereference_protected(table, 45 struct mesh_path *mpath = container_of(rp, struct mesh_path, rcu);
48 lockdep_is_held(&sdata->u.mesh.pathtbl_resize_lock));
49}
50 46
51static inline struct mesh_table *resize_dereference_mesh_paths( 47 del_timer_sync(&mpath->timer);
52 struct ieee80211_sub_if_data *sdata) 48 kfree(mpath);
53{
54 return resize_dereference_paths(sdata, sdata->u.mesh.mesh_paths);
55} 49}
56 50
57static inline struct mesh_table *resize_dereference_mpp_paths( 51static void mesh_path_rht_free(void *ptr, void *unused_arg)
58 struct ieee80211_sub_if_data *sdata)
59{ 52{
60 return resize_dereference_paths(sdata, sdata->u.mesh.mpp_paths); 53 struct mesh_path *mpath = ptr;
54 call_rcu(&mpath->rcu, mesh_path_reclaim);
61} 55}
62 56
63/* 57static struct mesh_table *mesh_table_alloc(void)
64 * CAREFUL -- "tbl" must not be an expression,
65 * in particular not an rcu_dereference(), since
66 * it's used twice. So it is illegal to do
67 * for_each_mesh_entry(rcu_dereference(...), ...)
68 */
69#define for_each_mesh_entry(tbl, node, i) \
70 for (i = 0; i <= tbl->hash_mask; i++) \
71 hlist_for_each_entry_rcu(node, &tbl->hash_buckets[i], list)
72
73
74static struct mesh_table *mesh_table_alloc(int size_order)
75{ 58{
76 int i;
77 struct mesh_table *newtbl; 59 struct mesh_table *newtbl;
78 60
79 newtbl = kmalloc(sizeof(struct mesh_table), GFP_ATOMIC); 61 newtbl = kmalloc(sizeof(struct mesh_table), GFP_ATOMIC);
80 if (!newtbl) 62 if (!newtbl)
81 return NULL; 63 return NULL;
82 64
83 newtbl->hash_buckets = kzalloc(sizeof(struct hlist_head) * 65 newtbl->known_gates = kzalloc(sizeof(struct hlist_head), GFP_ATOMIC);
84 (1 << size_order), GFP_ATOMIC); 66 if (!newtbl->known_gates) {
85
86 if (!newtbl->hash_buckets) {
87 kfree(newtbl); 67 kfree(newtbl);
88 return NULL; 68 return NULL;
89 } 69 }
90 70 INIT_HLIST_HEAD(newtbl->known_gates);
91 newtbl->hashwlock = kmalloc(sizeof(spinlock_t) *
92 (1 << size_order), GFP_ATOMIC);
93 if (!newtbl->hashwlock) {
94 kfree(newtbl->hash_buckets);
95 kfree(newtbl);
96 return NULL;
97 }
98
99 newtbl->size_order = size_order;
100 newtbl->hash_mask = (1 << size_order) - 1;
101 atomic_set(&newtbl->entries, 0); 71 atomic_set(&newtbl->entries, 0);
102 get_random_bytes(&newtbl->hash_rnd,
103 sizeof(newtbl->hash_rnd));
104 for (i = 0; i <= newtbl->hash_mask; i++)
105 spin_lock_init(&newtbl->hashwlock[i]);
106 spin_lock_init(&newtbl->gates_lock); 72 spin_lock_init(&newtbl->gates_lock);
107 73
108 return newtbl; 74 return newtbl;
109} 75}
110 76
111static void __mesh_table_free(struct mesh_table *tbl) 77static void mesh_table_free(struct mesh_table *tbl)
112{ 78{
113 kfree(tbl->hash_buckets); 79 rhashtable_free_and_destroy(&tbl->rhead,
114 kfree(tbl->hashwlock); 80 mesh_path_rht_free, NULL);
115 kfree(tbl); 81 kfree(tbl);
116} 82}
117 83
118static void mesh_table_free(struct mesh_table *tbl, bool free_leafs)
119{
120 struct hlist_head *mesh_hash;
121 struct hlist_node *p, *q;
122 struct mesh_path *gate;
123 int i;
124
125 mesh_hash = tbl->hash_buckets;
126 if (free_leafs) {
127 spin_lock_bh(&tbl->gates_lock);
128 hlist_for_each_entry_safe(gate, q,
129 tbl->known_gates, gate_list)
130 hlist_del(&gate->gate_list);
131 kfree(tbl->known_gates);
132 spin_unlock_bh(&tbl->gates_lock);
133 }
134 for (i = 0; i <= tbl->hash_mask; i++) {
135 spin_lock_bh(&tbl->hashwlock[i]);
136 hlist_for_each_safe(p, q, &mesh_hash[i]) {
137 tbl->free_node(p, free_leafs);
138 atomic_dec(&tbl->entries);
139 }
140 spin_unlock_bh(&tbl->hashwlock[i]);
141 }
142
143 __mesh_table_free(tbl);
144}
145
146static int mesh_table_grow(struct mesh_table *oldtbl,
147 struct mesh_table *newtbl)
148{
149 struct hlist_head *oldhash;
150 struct hlist_node *p, *q;
151 int i;
152
153 if (atomic_read(&oldtbl->entries)
154 < MEAN_CHAIN_LEN * (oldtbl->hash_mask + 1))
155 return -EAGAIN;
156
157 newtbl->free_node = oldtbl->free_node;
158 newtbl->copy_node = oldtbl->copy_node;
159 newtbl->known_gates = oldtbl->known_gates;
160 atomic_set(&newtbl->entries, atomic_read(&oldtbl->entries));
161
162 oldhash = oldtbl->hash_buckets;
163 for (i = 0; i <= oldtbl->hash_mask; i++)
164 hlist_for_each(p, &oldhash[i])
165 if (oldtbl->copy_node(p, newtbl) < 0)
166 goto errcopy;
167
168 return 0;
169
170errcopy:
171 for (i = 0; i <= newtbl->hash_mask; i++) {
172 hlist_for_each_safe(p, q, &newtbl->hash_buckets[i])
173 oldtbl->free_node(p, 0);
174 }
175 return -ENOMEM;
176}
177
178static u32 mesh_table_hash(const u8 *addr, struct mesh_table *tbl)
179{
180 /* Use last four bytes of hw addr as hash index */
181 return jhash_1word(*(u32 *)(addr+2), tbl->hash_rnd) & tbl->hash_mask;
182}
183
184
185/** 84/**
186 * 85 *
187 * mesh_path_assign_nexthop - update mesh path next hop 86 * mesh_path_assign_nexthop - update mesh path next hop
@@ -324,22 +223,15 @@ static struct mesh_path *mpath_lookup(struct mesh_table *tbl, const u8 *dst,
324 struct ieee80211_sub_if_data *sdata) 223 struct ieee80211_sub_if_data *sdata)
325{ 224{
326 struct mesh_path *mpath; 225 struct mesh_path *mpath;
327 struct hlist_head *bucket; 226
328 struct mpath_node *node; 227 mpath = rhashtable_lookup_fast(&tbl->rhead, dst, mesh_rht_params);
329 228
330 bucket = &tbl->hash_buckets[mesh_table_hash(dst, tbl)]; 229 if (mpath && mpath_expired(mpath)) {
331 hlist_for_each_entry_rcu(node, bucket, list) { 230 spin_lock_bh(&mpath->state_lock);
332 mpath = node->mpath; 231 mpath->flags &= ~MESH_PATH_ACTIVE;
333 if (ether_addr_equal(dst, mpath->dst)) { 232 spin_unlock_bh(&mpath->state_lock);
334 if (mpath_expired(mpath)) {
335 spin_lock_bh(&mpath->state_lock);
336 mpath->flags &= ~MESH_PATH_ACTIVE;
337 spin_unlock_bh(&mpath->state_lock);
338 }
339 return mpath;
340 }
341 } 233 }
342 return NULL; 234 return mpath;
343} 235}
344 236
345/** 237/**
@@ -354,17 +246,52 @@ static struct mesh_path *mpath_lookup(struct mesh_table *tbl, const u8 *dst,
354struct mesh_path * 246struct mesh_path *
355mesh_path_lookup(struct ieee80211_sub_if_data *sdata, const u8 *dst) 247mesh_path_lookup(struct ieee80211_sub_if_data *sdata, const u8 *dst)
356{ 248{
357 return mpath_lookup(rcu_dereference(sdata->u.mesh.mesh_paths), dst, 249 return mpath_lookup(sdata->u.mesh.mesh_paths, dst, sdata);
358 sdata);
359} 250}
360 251
361struct mesh_path * 252struct mesh_path *
362mpp_path_lookup(struct ieee80211_sub_if_data *sdata, const u8 *dst) 253mpp_path_lookup(struct ieee80211_sub_if_data *sdata, const u8 *dst)
363{ 254{
364 return mpath_lookup(rcu_dereference(sdata->u.mesh.mpp_paths), dst, 255 return mpath_lookup(sdata->u.mesh.mpp_paths, dst, sdata);
365 sdata);
366} 256}
367 257
258static struct mesh_path *
259__mesh_path_lookup_by_idx(struct mesh_table *tbl, int idx)
260{
261 int i = 0, ret;
262 struct mesh_path *mpath = NULL;
263 struct rhashtable_iter iter;
264
265 ret = rhashtable_walk_init(&tbl->rhead, &iter, GFP_ATOMIC);
266 if (ret)
267 return NULL;
268
269 ret = rhashtable_walk_start(&iter);
270 if (ret && ret != -EAGAIN)
271 goto err;
272
273 while ((mpath = rhashtable_walk_next(&iter))) {
274 if (IS_ERR(mpath) && PTR_ERR(mpath) == -EAGAIN)
275 continue;
276 if (IS_ERR(mpath))
277 break;
278 if (i++ == idx)
279 break;
280 }
281err:
282 rhashtable_walk_stop(&iter);
283 rhashtable_walk_exit(&iter);
284
285 if (IS_ERR(mpath) || !mpath)
286 return NULL;
287
288 if (mpath_expired(mpath)) {
289 spin_lock_bh(&mpath->state_lock);
290 mpath->flags &= ~MESH_PATH_ACTIVE;
291 spin_unlock_bh(&mpath->state_lock);
292 }
293 return mpath;
294}
368 295
369/** 296/**
370 * mesh_path_lookup_by_idx - look up a path in the mesh path table by its index 297 * mesh_path_lookup_by_idx - look up a path in the mesh path table by its index
@@ -378,23 +305,7 @@ mpp_path_lookup(struct ieee80211_sub_if_data *sdata, const u8 *dst)
378struct mesh_path * 305struct mesh_path *
379mesh_path_lookup_by_idx(struct ieee80211_sub_if_data *sdata, int idx) 306mesh_path_lookup_by_idx(struct ieee80211_sub_if_data *sdata, int idx)
380{ 307{
381 struct mesh_table *tbl = rcu_dereference(sdata->u.mesh.mesh_paths); 308 return __mesh_path_lookup_by_idx(sdata->u.mesh.mesh_paths, idx);
382 struct mpath_node *node;
383 int i;
384 int j = 0;
385
386 for_each_mesh_entry(tbl, node, i) {
387 if (j++ == idx) {
388 if (mpath_expired(node->mpath)) {
389 spin_lock_bh(&node->mpath->state_lock);
390 node->mpath->flags &= ~MESH_PATH_ACTIVE;
391 spin_unlock_bh(&node->mpath->state_lock);
392 }
393 return node->mpath;
394 }
395 }
396
397 return NULL;
398} 309}
399 310
400/** 311/**
@@ -409,17 +320,7 @@ mesh_path_lookup_by_idx(struct ieee80211_sub_if_data *sdata, int idx)
409struct mesh_path * 320struct mesh_path *
410mpp_path_lookup_by_idx(struct ieee80211_sub_if_data *sdata, int idx) 321mpp_path_lookup_by_idx(struct ieee80211_sub_if_data *sdata, int idx)
411{ 322{
412 struct mesh_table *tbl = rcu_dereference(sdata->u.mesh.mpp_paths); 323 return __mesh_path_lookup_by_idx(sdata->u.mesh.mpp_paths, idx);
413 struct mpath_node *node;
414 int i;
415 int j = 0;
416
417 for_each_mesh_entry(tbl, node, i) {
418 if (j++ == idx)
419 return node->mpath;
420 }
421
422 return NULL;
423} 324}
424 325
425/** 326/**
@@ -432,7 +333,7 @@ int mesh_path_add_gate(struct mesh_path *mpath)
432 int err; 333 int err;
433 334
434 rcu_read_lock(); 335 rcu_read_lock();
435 tbl = rcu_dereference(mpath->sdata->u.mesh.mesh_paths); 336 tbl = mpath->sdata->u.mesh.mesh_paths;
436 337
437 spin_lock_bh(&mpath->state_lock); 338 spin_lock_bh(&mpath->state_lock);
438 if (mpath->is_gate) { 339 if (mpath->is_gate) {
@@ -526,15 +427,9 @@ struct mesh_path *mesh_path_new(struct ieee80211_sub_if_data *sdata,
526struct mesh_path *mesh_path_add(struct ieee80211_sub_if_data *sdata, 427struct mesh_path *mesh_path_add(struct ieee80211_sub_if_data *sdata,
527 const u8 *dst) 428 const u8 *dst)
528{ 429{
529 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
530 struct ieee80211_local *local = sdata->local;
531 struct mesh_table *tbl; 430 struct mesh_table *tbl;
532 struct mesh_path *mpath, *new_mpath; 431 struct mesh_path *mpath, *new_mpath;
533 struct mpath_node *node, *new_node; 432 int ret;
534 struct hlist_head *bucket;
535 int grow = 0;
536 int err;
537 u32 hash_idx;
538 433
539 if (ether_addr_equal(dst, sdata->vif.addr)) 434 if (ether_addr_equal(dst, sdata->vif.addr))
540 /* never add ourselves as neighbours */ 435 /* never add ourselves as neighbours */
@@ -546,116 +441,44 @@ struct mesh_path *mesh_path_add(struct ieee80211_sub_if_data *sdata,
546 if (atomic_add_unless(&sdata->u.mesh.mpaths, 1, MESH_MAX_MPATHS) == 0) 441 if (atomic_add_unless(&sdata->u.mesh.mpaths, 1, MESH_MAX_MPATHS) == 0)
547 return ERR_PTR(-ENOSPC); 442 return ERR_PTR(-ENOSPC);
548 443
549 read_lock_bh(&sdata->u.mesh.pathtbl_resize_lock);
550 tbl = resize_dereference_mesh_paths(sdata);
551
552 hash_idx = mesh_table_hash(dst, tbl);
553 bucket = &tbl->hash_buckets[hash_idx];
554
555 spin_lock(&tbl->hashwlock[hash_idx]);
556
557 hlist_for_each_entry(node, bucket, list) {
558 mpath = node->mpath;
559 if (ether_addr_equal(dst, mpath->dst))
560 goto found;
561 }
562
563 err = -ENOMEM;
564 new_mpath = mesh_path_new(sdata, dst, GFP_ATOMIC); 444 new_mpath = mesh_path_new(sdata, dst, GFP_ATOMIC);
565 if (!new_mpath) 445 if (!new_mpath)
566 goto err_path_alloc; 446 return ERR_PTR(-ENOMEM);
567
568 new_node = kmalloc(sizeof(struct mpath_node), GFP_ATOMIC);
569 if (!new_node)
570 goto err_node_alloc;
571
572 new_node->mpath = new_mpath;
573 hlist_add_head_rcu(&new_node->list, bucket);
574 if (atomic_inc_return(&tbl->entries) >=
575 MEAN_CHAIN_LEN * (tbl->hash_mask + 1))
576 grow = 1;
577
578 sdata->u.mesh.mesh_paths_generation++;
579
580 if (grow) {
581 set_bit(MESH_WORK_GROW_MPATH_TABLE, &ifmsh->wrkq_flags);
582 ieee80211_queue_work(&local->hw, &sdata->work);
583 }
584 mpath = new_mpath;
585found:
586 spin_unlock(&tbl->hashwlock[hash_idx]);
587 read_unlock_bh(&sdata->u.mesh.pathtbl_resize_lock);
588 return mpath;
589
590err_node_alloc:
591 kfree(new_mpath);
592err_path_alloc:
593 atomic_dec(&sdata->u.mesh.mpaths);
594 spin_unlock(&tbl->hashwlock[hash_idx]);
595 read_unlock_bh(&sdata->u.mesh.pathtbl_resize_lock);
596 return ERR_PTR(err);
597}
598
599static void mesh_table_free_rcu(struct rcu_head *rcu)
600{
601 struct mesh_table *tbl = container_of(rcu, struct mesh_table, rcu_head);
602
603 mesh_table_free(tbl, false);
604}
605
606void mesh_mpath_table_grow(struct ieee80211_sub_if_data *sdata)
607{
608 struct mesh_table *oldtbl, *newtbl;
609 447
610 write_lock_bh(&sdata->u.mesh.pathtbl_resize_lock); 448 tbl = sdata->u.mesh.mesh_paths;
611 oldtbl = resize_dereference_mesh_paths(sdata); 449 do {
612 newtbl = mesh_table_alloc(oldtbl->size_order + 1); 450 ret = rhashtable_lookup_insert_fast(&tbl->rhead,
613 if (!newtbl) 451 &new_mpath->rhash,
614 goto out; 452 mesh_rht_params);
615 if (mesh_table_grow(oldtbl, newtbl) < 0) {
616 __mesh_table_free(newtbl);
617 goto out;
618 }
619 rcu_assign_pointer(sdata->u.mesh.mesh_paths, newtbl);
620 453
621 call_rcu(&oldtbl->rcu_head, mesh_table_free_rcu); 454 if (ret == -EEXIST)
455 mpath = rhashtable_lookup_fast(&tbl->rhead,
456 dst,
457 mesh_rht_params);
622 458
623 out: 459 } while (unlikely(ret == -EEXIST && !mpath));
624 write_unlock_bh(&sdata->u.mesh.pathtbl_resize_lock);
625}
626 460
627void mesh_mpp_table_grow(struct ieee80211_sub_if_data *sdata) 461 if (ret && ret != -EEXIST)
628{ 462 return ERR_PTR(ret);
629 struct mesh_table *oldtbl, *newtbl;
630 463
631 write_lock_bh(&sdata->u.mesh.pathtbl_resize_lock); 464 /* At this point either new_mpath was added, or we found a
632 oldtbl = resize_dereference_mpp_paths(sdata); 465 * matching entry already in the table; in the latter case
633 newtbl = mesh_table_alloc(oldtbl->size_order + 1); 466 * free the unnecessary new entry.
634 if (!newtbl) 467 */
635 goto out; 468 if (ret == -EEXIST) {
636 if (mesh_table_grow(oldtbl, newtbl) < 0) { 469 kfree(new_mpath);
637 __mesh_table_free(newtbl); 470 new_mpath = mpath;
638 goto out;
639 } 471 }
640 rcu_assign_pointer(sdata->u.mesh.mpp_paths, newtbl); 472 sdata->u.mesh.mesh_paths_generation++;
641 call_rcu(&oldtbl->rcu_head, mesh_table_free_rcu); 473 return new_mpath;
642
643 out:
644 write_unlock_bh(&sdata->u.mesh.pathtbl_resize_lock);
645} 474}
646 475
647int mpp_path_add(struct ieee80211_sub_if_data *sdata, 476int mpp_path_add(struct ieee80211_sub_if_data *sdata,
648 const u8 *dst, const u8 *mpp) 477 const u8 *dst, const u8 *mpp)
649{ 478{
650 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
651 struct ieee80211_local *local = sdata->local;
652 struct mesh_table *tbl; 479 struct mesh_table *tbl;
653 struct mesh_path *mpath, *new_mpath; 480 struct mesh_path *new_mpath;
654 struct mpath_node *node, *new_node; 481 int ret;
655 struct hlist_head *bucket;
656 int grow = 0;
657 int err = 0;
658 u32 hash_idx;
659 482
660 if (ether_addr_equal(dst, sdata->vif.addr)) 483 if (ether_addr_equal(dst, sdata->vif.addr))
661 /* never add ourselves as neighbours */ 484 /* never add ourselves as neighbours */
@@ -664,56 +487,19 @@ int mpp_path_add(struct ieee80211_sub_if_data *sdata,
664 if (is_multicast_ether_addr(dst)) 487 if (is_multicast_ether_addr(dst))
665 return -ENOTSUPP; 488 return -ENOTSUPP;
666 489
667 err = -ENOMEM;
668 new_mpath = mesh_path_new(sdata, dst, GFP_ATOMIC); 490 new_mpath = mesh_path_new(sdata, dst, GFP_ATOMIC);
669 if (!new_mpath)
670 goto err_path_alloc;
671 491
672 new_node = kmalloc(sizeof(struct mpath_node), GFP_ATOMIC); 492 if (!new_mpath)
673 if (!new_node) 493 return -ENOMEM;
674 goto err_node_alloc;
675 494
676 memcpy(new_mpath->mpp, mpp, ETH_ALEN); 495 memcpy(new_mpath->mpp, mpp, ETH_ALEN);
677 new_node->mpath = new_mpath; 496 tbl = sdata->u.mesh.mpp_paths;
678 read_lock_bh(&sdata->u.mesh.pathtbl_resize_lock); 497 ret = rhashtable_lookup_insert_fast(&tbl->rhead,
679 tbl = resize_dereference_mpp_paths(sdata); 498 &new_mpath->rhash,
680 499 mesh_rht_params);
681 hash_idx = mesh_table_hash(dst, tbl);
682 bucket = &tbl->hash_buckets[hash_idx];
683
684 spin_lock(&tbl->hashwlock[hash_idx]);
685
686 err = -EEXIST;
687 hlist_for_each_entry(node, bucket, list) {
688 mpath = node->mpath;
689 if (ether_addr_equal(dst, mpath->dst))
690 goto err_exists;
691 }
692
693 hlist_add_head_rcu(&new_node->list, bucket);
694 if (atomic_inc_return(&tbl->entries) >=
695 MEAN_CHAIN_LEN * (tbl->hash_mask + 1))
696 grow = 1;
697
698 spin_unlock(&tbl->hashwlock[hash_idx]);
699 read_unlock_bh(&sdata->u.mesh.pathtbl_resize_lock);
700 500
701 sdata->u.mesh.mpp_paths_generation++; 501 sdata->u.mesh.mpp_paths_generation++;
702 502 return ret;
703 if (grow) {
704 set_bit(MESH_WORK_GROW_MPP_TABLE, &ifmsh->wrkq_flags);
705 ieee80211_queue_work(&local->hw, &sdata->work);
706 }
707 return 0;
708
709err_exists:
710 spin_unlock(&tbl->hashwlock[hash_idx]);
711 read_unlock_bh(&sdata->u.mesh.pathtbl_resize_lock);
712 kfree(new_node);
713err_node_alloc:
714 kfree(new_mpath);
715err_path_alloc:
716 return err;
717} 503}
718 504
719 505
@@ -727,17 +513,26 @@ err_path_alloc:
727 */ 513 */
728void mesh_plink_broken(struct sta_info *sta) 514void mesh_plink_broken(struct sta_info *sta)
729{ 515{
730 struct mesh_table *tbl; 516 struct ieee80211_sub_if_data *sdata = sta->sdata;
517 struct mesh_table *tbl = sdata->u.mesh.mesh_paths;
731 static const u8 bcast[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}; 518 static const u8 bcast[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
732 struct mesh_path *mpath; 519 struct mesh_path *mpath;
733 struct mpath_node *node; 520 struct rhashtable_iter iter;
734 struct ieee80211_sub_if_data *sdata = sta->sdata; 521 int ret;
735 int i;
736 522
737 rcu_read_lock(); 523 ret = rhashtable_walk_init(&tbl->rhead, &iter, GFP_ATOMIC);
738 tbl = rcu_dereference(sdata->u.mesh.mesh_paths); 524 if (ret)
739 for_each_mesh_entry(tbl, node, i) { 525 return;
740 mpath = node->mpath; 526
527 ret = rhashtable_walk_start(&iter);
528 if (ret && ret != -EAGAIN)
529 goto out;
530
531 while ((mpath = rhashtable_walk_next(&iter))) {
532 if (IS_ERR(mpath) && PTR_ERR(mpath) == -EAGAIN)
533 continue;
534 if (IS_ERR(mpath))
535 break;
741 if (rcu_access_pointer(mpath->next_hop) == sta && 536 if (rcu_access_pointer(mpath->next_hop) == sta &&
742 mpath->flags & MESH_PATH_ACTIVE && 537 mpath->flags & MESH_PATH_ACTIVE &&
743 !(mpath->flags & MESH_PATH_FIXED)) { 538 !(mpath->flags & MESH_PATH_FIXED)) {
@@ -751,30 +546,20 @@ void mesh_plink_broken(struct sta_info *sta)
751 WLAN_REASON_MESH_PATH_DEST_UNREACHABLE, bcast); 546 WLAN_REASON_MESH_PATH_DEST_UNREACHABLE, bcast);
752 } 547 }
753 } 548 }
754 rcu_read_unlock(); 549out:
755} 550 rhashtable_walk_stop(&iter);
756 551 rhashtable_walk_exit(&iter);
757static void mesh_path_node_reclaim(struct rcu_head *rp)
758{
759 struct mpath_node *node = container_of(rp, struct mpath_node, rcu);
760
761 del_timer_sync(&node->mpath->timer);
762 kfree(node->mpath);
763 kfree(node);
764} 552}
765 553
766/* needs to be called with the corresponding hashwlock taken */ 554static void __mesh_path_del(struct mesh_table *tbl, struct mesh_path *mpath)
767static void __mesh_path_del(struct mesh_table *tbl, struct mpath_node *node)
768{ 555{
769 struct mesh_path *mpath = node->mpath; 556 struct ieee80211_sub_if_data *sdata = mpath->sdata;
770 struct ieee80211_sub_if_data *sdata = node->mpath->sdata;
771 557
558 rhashtable_remove_fast(&tbl->rhead, &mpath->rhash, mesh_rht_params);
772 spin_lock_bh(&mpath->state_lock); 559 spin_lock_bh(&mpath->state_lock);
773 mpath->flags |= MESH_PATH_RESOLVING; 560 mpath->flags |= MESH_PATH_RESOLVING;
774 if (mpath->is_gate) 561 mesh_gate_del(tbl, mpath);
775 mesh_gate_del(tbl, mpath); 562 call_rcu(&mpath->rcu, mesh_path_reclaim);
776 hlist_del_rcu(&node->list);
777 call_rcu(&node->rcu, mesh_path_node_reclaim);
778 spin_unlock_bh(&mpath->state_lock); 563 spin_unlock_bh(&mpath->state_lock);
779 atomic_dec(&sdata->u.mesh.mpaths); 564 atomic_dec(&sdata->u.mesh.mpaths);
780 atomic_dec(&tbl->entries); 565 atomic_dec(&tbl->entries);
@@ -794,63 +579,87 @@ static void __mesh_path_del(struct mesh_table *tbl, struct mpath_node *node)
794void mesh_path_flush_by_nexthop(struct sta_info *sta) 579void mesh_path_flush_by_nexthop(struct sta_info *sta)
795{ 580{
796 struct ieee80211_sub_if_data *sdata = sta->sdata; 581 struct ieee80211_sub_if_data *sdata = sta->sdata;
797 struct mesh_table *tbl; 582 struct mesh_table *tbl = sdata->u.mesh.mesh_paths;
798 struct mesh_path *mpath; 583 struct mesh_path *mpath;
799 struct mpath_node *node; 584 struct rhashtable_iter iter;
800 int i; 585 int ret;
801 586
802 rcu_read_lock(); 587 ret = rhashtable_walk_init(&tbl->rhead, &iter, GFP_ATOMIC);
803 read_lock_bh(&sdata->u.mesh.pathtbl_resize_lock); 588 if (ret)
804 tbl = resize_dereference_mesh_paths(sdata); 589 return;
805 for_each_mesh_entry(tbl, node, i) { 590
806 mpath = node->mpath; 591 ret = rhashtable_walk_start(&iter);
807 if (rcu_access_pointer(mpath->next_hop) == sta) { 592 if (ret && ret != -EAGAIN)
808 spin_lock(&tbl->hashwlock[i]); 593 goto out;
809 __mesh_path_del(tbl, node); 594
810 spin_unlock(&tbl->hashwlock[i]); 595 while ((mpath = rhashtable_walk_next(&iter))) {
811 } 596 if (IS_ERR(mpath) && PTR_ERR(mpath) == -EAGAIN)
597 continue;
598 if (IS_ERR(mpath))
599 break;
600
601 if (rcu_access_pointer(mpath->next_hop) == sta)
602 __mesh_path_del(tbl, mpath);
812 } 603 }
813 read_unlock_bh(&sdata->u.mesh.pathtbl_resize_lock); 604out:
814 rcu_read_unlock(); 605 rhashtable_walk_stop(&iter);
606 rhashtable_walk_exit(&iter);
815} 607}
816 608
817static void mpp_flush_by_proxy(struct ieee80211_sub_if_data *sdata, 609static void mpp_flush_by_proxy(struct ieee80211_sub_if_data *sdata,
818 const u8 *proxy) 610 const u8 *proxy)
819{ 611{
820 struct mesh_table *tbl; 612 struct mesh_table *tbl = sdata->u.mesh.mpp_paths;
821 struct mesh_path *mpp; 613 struct mesh_path *mpath;
822 struct mpath_node *node; 614 struct rhashtable_iter iter;
823 int i; 615 int ret;
824 616
825 rcu_read_lock(); 617 ret = rhashtable_walk_init(&tbl->rhead, &iter, GFP_ATOMIC);
826 read_lock_bh(&sdata->u.mesh.pathtbl_resize_lock); 618 if (ret)
827 tbl = resize_dereference_mpp_paths(sdata); 619 return;
828 for_each_mesh_entry(tbl, node, i) { 620
829 mpp = node->mpath; 621 ret = rhashtable_walk_start(&iter);
830 if (ether_addr_equal(mpp->mpp, proxy)) { 622 if (ret && ret != -EAGAIN)
831 spin_lock(&tbl->hashwlock[i]); 623 goto out;
832 __mesh_path_del(tbl, node); 624
833 spin_unlock(&tbl->hashwlock[i]); 625 while ((mpath = rhashtable_walk_next(&iter))) {
834 } 626 if (IS_ERR(mpath) && PTR_ERR(mpath) == -EAGAIN)
627 continue;
628 if (IS_ERR(mpath))
629 break;
630
631 if (ether_addr_equal(mpath->mpp, proxy))
632 __mesh_path_del(tbl, mpath);
835 } 633 }
836 read_unlock_bh(&sdata->u.mesh.pathtbl_resize_lock); 634out:
837 rcu_read_unlock(); 635 rhashtable_walk_stop(&iter);
636 rhashtable_walk_exit(&iter);
838} 637}
839 638
840static void table_flush_by_iface(struct mesh_table *tbl, 639static void table_flush_by_iface(struct mesh_table *tbl)
841 struct ieee80211_sub_if_data *sdata)
842{ 640{
843 struct mesh_path *mpath; 641 struct mesh_path *mpath;
844 struct mpath_node *node; 642 struct rhashtable_iter iter;
845 int i; 643 int ret;
846 644
847 WARN_ON(!rcu_read_lock_held()); 645 ret = rhashtable_walk_init(&tbl->rhead, &iter, GFP_ATOMIC);
848 for_each_mesh_entry(tbl, node, i) { 646 if (ret)
849 mpath = node->mpath; 647 return;
850 spin_lock_bh(&tbl->hashwlock[i]); 648
851 __mesh_path_del(tbl, node); 649 ret = rhashtable_walk_start(&iter);
852 spin_unlock_bh(&tbl->hashwlock[i]); 650 if (ret && ret != -EAGAIN)
651 goto out;
652
653 while ((mpath = rhashtable_walk_next(&iter))) {
654 if (IS_ERR(mpath) && PTR_ERR(mpath) == -EAGAIN)
655 continue;
656 if (IS_ERR(mpath))
657 break;
658 __mesh_path_del(tbl, mpath);
853 } 659 }
660out:
661 rhashtable_walk_stop(&iter);
662 rhashtable_walk_exit(&iter);
854} 663}
855 664
856/** 665/**
@@ -863,16 +672,8 @@ static void table_flush_by_iface(struct mesh_table *tbl,
863 */ 672 */
864void mesh_path_flush_by_iface(struct ieee80211_sub_if_data *sdata) 673void mesh_path_flush_by_iface(struct ieee80211_sub_if_data *sdata)
865{ 674{
866 struct mesh_table *tbl; 675 table_flush_by_iface(sdata->u.mesh.mesh_paths);
867 676 table_flush_by_iface(sdata->u.mesh.mpp_paths);
868 rcu_read_lock();
869 read_lock_bh(&sdata->u.mesh.pathtbl_resize_lock);
870 tbl = resize_dereference_mesh_paths(sdata);
871 table_flush_by_iface(tbl, sdata);
872 tbl = resize_dereference_mpp_paths(sdata);
873 table_flush_by_iface(tbl, sdata);
874 read_unlock_bh(&sdata->u.mesh.pathtbl_resize_lock);
875 rcu_read_unlock();
876} 677}
877 678
878/** 679/**
@@ -884,36 +685,25 @@ void mesh_path_flush_by_iface(struct ieee80211_sub_if_data *sdata)
884 * 685 *
885 * Returns: 0 if successful 686 * Returns: 0 if successful
886 */ 687 */
887static int table_path_del(struct mesh_table __rcu *rcu_tbl, 688static int table_path_del(struct mesh_table *tbl,
888 struct ieee80211_sub_if_data *sdata, 689 struct ieee80211_sub_if_data *sdata,
889 const u8 *addr) 690 const u8 *addr)
890{ 691{
891 struct mesh_table *tbl;
892 struct mesh_path *mpath; 692 struct mesh_path *mpath;
893 struct mpath_node *node; 693
894 struct hlist_head *bucket; 694 rcu_read_lock();
895 int hash_idx; 695 mpath = rhashtable_lookup_fast(&tbl->rhead, addr, mesh_rht_params);
896 int err = 0; 696 if (!mpath) {
897 697 rcu_read_unlock();
898 tbl = resize_dereference_paths(sdata, rcu_tbl); 698 return -ENXIO;
899 hash_idx = mesh_table_hash(addr, tbl);
900 bucket = &tbl->hash_buckets[hash_idx];
901
902 spin_lock(&tbl->hashwlock[hash_idx]);
903 hlist_for_each_entry(node, bucket, list) {
904 mpath = node->mpath;
905 if (ether_addr_equal(addr, mpath->dst)) {
906 __mesh_path_del(tbl, node);
907 goto enddel;
908 }
909 } 699 }
910 700
911 err = -ENXIO; 701 __mesh_path_del(tbl, mpath);
912enddel: 702 rcu_read_unlock();
913 spin_unlock(&tbl->hashwlock[hash_idx]); 703 return 0;
914 return err;
915} 704}
916 705
706
917/** 707/**
918 * mesh_path_del - delete a mesh path from the table 708 * mesh_path_del - delete a mesh path from the table
919 * 709 *
@@ -924,36 +714,13 @@ enddel:
924 */ 714 */
925int mesh_path_del(struct ieee80211_sub_if_data *sdata, const u8 *addr) 715int mesh_path_del(struct ieee80211_sub_if_data *sdata, const u8 *addr)
926{ 716{
927 int err = 0; 717 int err;
928 718
929 /* flush relevant mpp entries first */ 719 /* flush relevant mpp entries first */
930 mpp_flush_by_proxy(sdata, addr); 720 mpp_flush_by_proxy(sdata, addr);
931 721
932 read_lock_bh(&sdata->u.mesh.pathtbl_resize_lock);
933 err = table_path_del(sdata->u.mesh.mesh_paths, sdata, addr); 722 err = table_path_del(sdata->u.mesh.mesh_paths, sdata, addr);
934 sdata->u.mesh.mesh_paths_generation++; 723 sdata->u.mesh.mesh_paths_generation++;
935 read_unlock_bh(&sdata->u.mesh.pathtbl_resize_lock);
936
937 return err;
938}
939
940/**
941 * mpp_path_del - delete a mesh proxy path from the table
942 *
943 * @addr: addr address (ETH_ALEN length)
944 * @sdata: local subif
945 *
946 * Returns: 0 if successful
947 */
948static int mpp_path_del(struct ieee80211_sub_if_data *sdata, const u8 *addr)
949{
950 int err = 0;
951
952 read_lock_bh(&sdata->u.mesh.pathtbl_resize_lock);
953 err = table_path_del(sdata->u.mesh.mpp_paths, sdata, addr);
954 sdata->u.mesh.mpp_paths_generation++;
955 read_unlock_bh(&sdata->u.mesh.pathtbl_resize_lock);
956
957 return err; 724 return err;
958} 725}
959 726
@@ -987,18 +754,17 @@ int mesh_path_send_to_gates(struct mesh_path *mpath)
987 struct ieee80211_sub_if_data *sdata = mpath->sdata; 754 struct ieee80211_sub_if_data *sdata = mpath->sdata;
988 struct mesh_table *tbl; 755 struct mesh_table *tbl;
989 struct mesh_path *from_mpath = mpath; 756 struct mesh_path *from_mpath = mpath;
990 struct mesh_path *gate = NULL; 757 struct mesh_path *gate;
991 bool copy = false; 758 bool copy = false;
992 struct hlist_head *known_gates; 759 struct hlist_head *known_gates;
993 760
994 rcu_read_lock(); 761 tbl = sdata->u.mesh.mesh_paths;
995 tbl = rcu_dereference(sdata->u.mesh.mesh_paths);
996 known_gates = tbl->known_gates; 762 known_gates = tbl->known_gates;
997 rcu_read_unlock();
998 763
999 if (!known_gates) 764 if (!known_gates)
1000 return -EHOSTUNREACH; 765 return -EHOSTUNREACH;
1001 766
767 rcu_read_lock();
1002 hlist_for_each_entry_rcu(gate, known_gates, gate_list) { 768 hlist_for_each_entry_rcu(gate, known_gates, gate_list) {
1003 if (gate->flags & MESH_PATH_ACTIVE) { 769 if (gate->flags & MESH_PATH_ACTIVE) {
1004 mpath_dbg(sdata, "Forwarding to %pM\n", gate->dst); 770 mpath_dbg(sdata, "Forwarding to %pM\n", gate->dst);
@@ -1016,6 +782,7 @@ int mesh_path_send_to_gates(struct mesh_path *mpath)
1016 mpath_dbg(sdata, "Sending to %pM\n", gate->dst); 782 mpath_dbg(sdata, "Sending to %pM\n", gate->dst);
1017 mesh_path_tx_pending(gate); 783 mesh_path_tx_pending(gate);
1018 } 784 }
785 rcu_read_unlock();
1019 786
1020 return (from_mpath == mpath) ? -EHOSTUNREACH : 0; 787 return (from_mpath == mpath) ? -EHOSTUNREACH : 0;
1021} 788}
@@ -1072,118 +839,73 @@ void mesh_path_fix_nexthop(struct mesh_path *mpath, struct sta_info *next_hop)
1072 mesh_path_tx_pending(mpath); 839 mesh_path_tx_pending(mpath);
1073} 840}
1074 841
1075static void mesh_path_node_free(struct hlist_node *p, bool free_leafs)
1076{
1077 struct mesh_path *mpath;
1078 struct mpath_node *node = hlist_entry(p, struct mpath_node, list);
1079 mpath = node->mpath;
1080 hlist_del_rcu(p);
1081 if (free_leafs) {
1082 del_timer_sync(&mpath->timer);
1083 kfree(mpath);
1084 }
1085 kfree(node);
1086}
1087
1088static int mesh_path_node_copy(struct hlist_node *p, struct mesh_table *newtbl)
1089{
1090 struct mesh_path *mpath;
1091 struct mpath_node *node, *new_node;
1092 u32 hash_idx;
1093
1094 new_node = kmalloc(sizeof(struct mpath_node), GFP_ATOMIC);
1095 if (new_node == NULL)
1096 return -ENOMEM;
1097
1098 node = hlist_entry(p, struct mpath_node, list);
1099 mpath = node->mpath;
1100 new_node->mpath = mpath;
1101 hash_idx = mesh_table_hash(mpath->dst, newtbl);
1102 hlist_add_head(&new_node->list,
1103 &newtbl->hash_buckets[hash_idx]);
1104 return 0;
1105}
1106
1107int mesh_pathtbl_init(struct ieee80211_sub_if_data *sdata) 842int mesh_pathtbl_init(struct ieee80211_sub_if_data *sdata)
1108{ 843{
1109 struct mesh_table *tbl_path, *tbl_mpp; 844 struct mesh_table *tbl_path, *tbl_mpp;
1110 int ret; 845 int ret;
1111 846
1112 tbl_path = mesh_table_alloc(INIT_PATHS_SIZE_ORDER); 847 tbl_path = mesh_table_alloc();
1113 if (!tbl_path) 848 if (!tbl_path)
1114 return -ENOMEM; 849 return -ENOMEM;
1115 tbl_path->free_node = &mesh_path_node_free;
1116 tbl_path->copy_node = &mesh_path_node_copy;
1117 tbl_path->known_gates = kzalloc(sizeof(struct hlist_head), GFP_ATOMIC);
1118 if (!tbl_path->known_gates) {
1119 ret = -ENOMEM;
1120 goto free_path;
1121 }
1122 INIT_HLIST_HEAD(tbl_path->known_gates);
1123 850
1124 851 tbl_mpp = mesh_table_alloc();
1125 tbl_mpp = mesh_table_alloc(INIT_PATHS_SIZE_ORDER);
1126 if (!tbl_mpp) { 852 if (!tbl_mpp) {
1127 ret = -ENOMEM; 853 ret = -ENOMEM;
1128 goto free_path; 854 goto free_path;
1129 } 855 }
1130 tbl_mpp->free_node = &mesh_path_node_free;
1131 tbl_mpp->copy_node = &mesh_path_node_copy;
1132 tbl_mpp->known_gates = kzalloc(sizeof(struct hlist_head), GFP_ATOMIC);
1133 if (!tbl_mpp->known_gates) {
1134 ret = -ENOMEM;
1135 goto free_mpp;
1136 }
1137 INIT_HLIST_HEAD(tbl_mpp->known_gates);
1138 856
1139 rwlock_init(&sdata->u.mesh.pathtbl_resize_lock); 857 rhashtable_init(&tbl_path->rhead, &mesh_rht_params);
858 rhashtable_init(&tbl_mpp->rhead, &mesh_rht_params);
1140 859
1141 /* Need no locking since this is during init */ 860 sdata->u.mesh.mesh_paths = tbl_path;
1142 RCU_INIT_POINTER(sdata->u.mesh.mesh_paths, tbl_path); 861 sdata->u.mesh.mpp_paths = tbl_mpp;
1143 RCU_INIT_POINTER(sdata->u.mesh.mpp_paths, tbl_mpp);
1144 862
1145 return 0; 863 return 0;
1146 864
1147free_mpp:
1148 mesh_table_free(tbl_mpp, true);
1149free_path: 865free_path:
1150 mesh_table_free(tbl_path, true); 866 mesh_table_free(tbl_path);
1151 return ret; 867 return ret;
1152} 868}
1153 869
1154void mesh_path_expire(struct ieee80211_sub_if_data *sdata) 870static
871void mesh_path_tbl_expire(struct ieee80211_sub_if_data *sdata,
872 struct mesh_table *tbl)
1155{ 873{
1156 struct mesh_table *tbl;
1157 struct mesh_path *mpath; 874 struct mesh_path *mpath;
1158 struct mpath_node *node; 875 struct rhashtable_iter iter;
1159 int i; 876 int ret;
1160 877
1161 rcu_read_lock(); 878 ret = rhashtable_walk_init(&tbl->rhead, &iter, GFP_KERNEL);
1162 tbl = rcu_dereference(sdata->u.mesh.mesh_paths); 879 if (ret)
1163 for_each_mesh_entry(tbl, node, i) { 880 return;
1164 mpath = node->mpath; 881
882 ret = rhashtable_walk_start(&iter);
883 if (ret && ret != -EAGAIN)
884 goto out;
885
886 while ((mpath = rhashtable_walk_next(&iter))) {
887 if (IS_ERR(mpath) && PTR_ERR(mpath) == -EAGAIN)
888 continue;
889 if (IS_ERR(mpath))
890 break;
1165 if ((!(mpath->flags & MESH_PATH_RESOLVING)) && 891 if ((!(mpath->flags & MESH_PATH_RESOLVING)) &&
1166 (!(mpath->flags & MESH_PATH_FIXED)) && 892 (!(mpath->flags & MESH_PATH_FIXED)) &&
1167 time_after(jiffies, mpath->exp_time + MESH_PATH_EXPIRE)) 893 time_after(jiffies, mpath->exp_time + MESH_PATH_EXPIRE))
1168 mesh_path_del(sdata, mpath->dst); 894 __mesh_path_del(tbl, mpath);
1169 }
1170
1171 tbl = rcu_dereference(sdata->u.mesh.mpp_paths);
1172 for_each_mesh_entry(tbl, node, i) {
1173 mpath = node->mpath;
1174 if ((!(mpath->flags & MESH_PATH_FIXED)) &&
1175 time_after(jiffies, mpath->exp_time + MESH_PATH_EXPIRE))
1176 mpp_path_del(sdata, mpath->dst);
1177 } 895 }
896out:
897 rhashtable_walk_stop(&iter);
898 rhashtable_walk_exit(&iter);
899}
1178 900
1179 rcu_read_unlock(); 901void mesh_path_expire(struct ieee80211_sub_if_data *sdata)
902{
903 mesh_path_tbl_expire(sdata, sdata->u.mesh.mesh_paths);
904 mesh_path_tbl_expire(sdata, sdata->u.mesh.mpp_paths);
1180} 905}
1181 906
1182void mesh_pathtbl_unregister(struct ieee80211_sub_if_data *sdata) 907void mesh_pathtbl_unregister(struct ieee80211_sub_if_data *sdata)
1183{ 908{
1184 /* no need for locking during exit path */ 909 mesh_table_free(sdata->u.mesh.mesh_paths);
1185 mesh_table_free(rcu_dereference_protected(sdata->u.mesh.mesh_paths, 1), 910 mesh_table_free(sdata->u.mesh.mpp_paths);
1186 true);
1187 mesh_table_free(rcu_dereference_protected(sdata->u.mesh.mpp_paths, 1),
1188 true);
1189} 911}