diff options
author | Glenn Elliott <gelliott@cs.unc.edu> | 2012-03-04 19:47:13 -0500 |
---|---|---|
committer | Glenn Elliott <gelliott@cs.unc.edu> | 2012-03-04 19:47:13 -0500 |
commit | c71c03bda1e86c9d5198c5d83f712e695c4f2a1e (patch) | |
tree | ecb166cb3e2b7e2adb3b5e292245fefd23381ac8 /net/mac80211/mesh_pathtbl.c | |
parent | ea53c912f8a86a8567697115b6a0d8152beee5c8 (diff) | |
parent | 6a00f206debf8a5c8899055726ad127dbeeed098 (diff) |
Merge branch 'mpi-master' into wip-k-fmlpwip-k-fmlp
Conflicts:
litmus/sched_cedf.c
Diffstat (limited to 'net/mac80211/mesh_pathtbl.c')
-rw-r--r-- | net/mac80211/mesh_pathtbl.c | 317 |
1 files changed, 211 insertions, 106 deletions
diff --git a/net/mac80211/mesh_pathtbl.c b/net/mac80211/mesh_pathtbl.c index 349e466cf08b..0d2faacc3e87 100644 --- a/net/mac80211/mesh_pathtbl.c +++ b/net/mac80211/mesh_pathtbl.c | |||
@@ -36,10 +36,77 @@ struct mpath_node { | |||
36 | struct mesh_path *mpath; | 36 | struct mesh_path *mpath; |
37 | }; | 37 | }; |
38 | 38 | ||
39 | static struct mesh_table *mesh_paths; | 39 | static struct mesh_table __rcu *mesh_paths; |
40 | static struct mesh_table *mpp_paths; /* Store paths for MPP&MAP */ | 40 | static struct mesh_table __rcu *mpp_paths; /* Store paths for MPP&MAP */ |
41 | 41 | ||
42 | int mesh_paths_generation; | 42 | int mesh_paths_generation; |
43 | |||
44 | /* This lock will have the grow table function as writer and add / delete nodes | ||
45 | * as readers. When reading the table (i.e. doing lookups) we are well protected | ||
46 | * by RCU | ||
47 | */ | ||
48 | static DEFINE_RWLOCK(pathtbl_resize_lock); | ||
49 | |||
50 | |||
51 | static inline struct mesh_table *resize_dereference_mesh_paths(void) | ||
52 | { | ||
53 | return rcu_dereference_protected(mesh_paths, | ||
54 | lockdep_is_held(&pathtbl_resize_lock)); | ||
55 | } | ||
56 | |||
57 | static inline struct mesh_table *resize_dereference_mpp_paths(void) | ||
58 | { | ||
59 | return rcu_dereference_protected(mpp_paths, | ||
60 | lockdep_is_held(&pathtbl_resize_lock)); | ||
61 | } | ||
62 | |||
63 | /* | ||
64 | * CAREFUL -- "tbl" must not be an expression, | ||
65 | * in particular not an rcu_dereference(), since | ||
66 | * it's used twice. So it is illegal to do | ||
67 | * for_each_mesh_entry(rcu_dereference(...), ...) | ||
68 | */ | ||
69 | #define for_each_mesh_entry(tbl, p, node, i) \ | ||
70 | for (i = 0; i <= tbl->hash_mask; i++) \ | ||
71 | hlist_for_each_entry_rcu(node, p, &tbl->hash_buckets[i], list) | ||
72 | |||
73 | |||
74 | static struct mesh_table *mesh_table_alloc(int size_order) | ||
75 | { | ||
76 | int i; | ||
77 | struct mesh_table *newtbl; | ||
78 | |||
79 | newtbl = kmalloc(sizeof(struct mesh_table), GFP_ATOMIC); | ||
80 | if (!newtbl) | ||
81 | return NULL; | ||
82 | |||
83 | newtbl->hash_buckets = kzalloc(sizeof(struct hlist_head) * | ||
84 | (1 << size_order), GFP_ATOMIC); | ||
85 | |||
86 | if (!newtbl->hash_buckets) { | ||
87 | kfree(newtbl); | ||
88 | return NULL; | ||
89 | } | ||
90 | |||
91 | newtbl->hashwlock = kmalloc(sizeof(spinlock_t) * | ||
92 | (1 << size_order), GFP_ATOMIC); | ||
93 | if (!newtbl->hashwlock) { | ||
94 | kfree(newtbl->hash_buckets); | ||
95 | kfree(newtbl); | ||
96 | return NULL; | ||
97 | } | ||
98 | |||
99 | newtbl->size_order = size_order; | ||
100 | newtbl->hash_mask = (1 << size_order) - 1; | ||
101 | atomic_set(&newtbl->entries, 0); | ||
102 | get_random_bytes(&newtbl->hash_rnd, | ||
103 | sizeof(newtbl->hash_rnd)); | ||
104 | for (i = 0; i <= newtbl->hash_mask; i++) | ||
105 | spin_lock_init(&newtbl->hashwlock[i]); | ||
106 | |||
107 | return newtbl; | ||
108 | } | ||
109 | |||
43 | static void __mesh_table_free(struct mesh_table *tbl) | 110 | static void __mesh_table_free(struct mesh_table *tbl) |
44 | { | 111 | { |
45 | kfree(tbl->hash_buckets); | 112 | kfree(tbl->hash_buckets); |
@@ -47,7 +114,7 @@ static void __mesh_table_free(struct mesh_table *tbl) | |||
47 | kfree(tbl); | 114 | kfree(tbl); |
48 | } | 115 | } |
49 | 116 | ||
50 | void mesh_table_free(struct mesh_table *tbl, bool free_leafs) | 117 | static void mesh_table_free(struct mesh_table *tbl, bool free_leafs) |
51 | { | 118 | { |
52 | struct hlist_head *mesh_hash; | 119 | struct hlist_head *mesh_hash; |
53 | struct hlist_node *p, *q; | 120 | struct hlist_node *p, *q; |
@@ -55,60 +122,56 @@ void mesh_table_free(struct mesh_table *tbl, bool free_leafs) | |||
55 | 122 | ||
56 | mesh_hash = tbl->hash_buckets; | 123 | mesh_hash = tbl->hash_buckets; |
57 | for (i = 0; i <= tbl->hash_mask; i++) { | 124 | for (i = 0; i <= tbl->hash_mask; i++) { |
58 | spin_lock(&tbl->hashwlock[i]); | 125 | spin_lock_bh(&tbl->hashwlock[i]); |
59 | hlist_for_each_safe(p, q, &mesh_hash[i]) { | 126 | hlist_for_each_safe(p, q, &mesh_hash[i]) { |
60 | tbl->free_node(p, free_leafs); | 127 | tbl->free_node(p, free_leafs); |
61 | atomic_dec(&tbl->entries); | 128 | atomic_dec(&tbl->entries); |
62 | } | 129 | } |
63 | spin_unlock(&tbl->hashwlock[i]); | 130 | spin_unlock_bh(&tbl->hashwlock[i]); |
64 | } | 131 | } |
65 | __mesh_table_free(tbl); | 132 | __mesh_table_free(tbl); |
66 | } | 133 | } |
67 | 134 | ||
68 | static struct mesh_table *mesh_table_grow(struct mesh_table *tbl) | 135 | static int mesh_table_grow(struct mesh_table *oldtbl, |
136 | struct mesh_table *newtbl) | ||
69 | { | 137 | { |
70 | struct mesh_table *newtbl; | ||
71 | struct hlist_head *oldhash; | 138 | struct hlist_head *oldhash; |
72 | struct hlist_node *p, *q; | 139 | struct hlist_node *p, *q; |
73 | int i; | 140 | int i; |
74 | 141 | ||
75 | if (atomic_read(&tbl->entries) | 142 | if (atomic_read(&oldtbl->entries) |
76 | < tbl->mean_chain_len * (tbl->hash_mask + 1)) | 143 | < oldtbl->mean_chain_len * (oldtbl->hash_mask + 1)) |
77 | goto endgrow; | 144 | return -EAGAIN; |
78 | 145 | ||
79 | newtbl = mesh_table_alloc(tbl->size_order + 1); | 146 | newtbl->free_node = oldtbl->free_node; |
80 | if (!newtbl) | 147 | newtbl->mean_chain_len = oldtbl->mean_chain_len; |
81 | goto endgrow; | 148 | newtbl->copy_node = oldtbl->copy_node; |
149 | atomic_set(&newtbl->entries, atomic_read(&oldtbl->entries)); | ||
82 | 150 | ||
83 | newtbl->free_node = tbl->free_node; | 151 | oldhash = oldtbl->hash_buckets; |
84 | newtbl->mean_chain_len = tbl->mean_chain_len; | 152 | for (i = 0; i <= oldtbl->hash_mask; i++) |
85 | newtbl->copy_node = tbl->copy_node; | ||
86 | atomic_set(&newtbl->entries, atomic_read(&tbl->entries)); | ||
87 | |||
88 | oldhash = tbl->hash_buckets; | ||
89 | for (i = 0; i <= tbl->hash_mask; i++) | ||
90 | hlist_for_each(p, &oldhash[i]) | 153 | hlist_for_each(p, &oldhash[i]) |
91 | if (tbl->copy_node(p, newtbl) < 0) | 154 | if (oldtbl->copy_node(p, newtbl) < 0) |
92 | goto errcopy; | 155 | goto errcopy; |
93 | 156 | ||
94 | return newtbl; | 157 | return 0; |
95 | 158 | ||
96 | errcopy: | 159 | errcopy: |
97 | for (i = 0; i <= newtbl->hash_mask; i++) { | 160 | for (i = 0; i <= newtbl->hash_mask; i++) { |
98 | hlist_for_each_safe(p, q, &newtbl->hash_buckets[i]) | 161 | hlist_for_each_safe(p, q, &newtbl->hash_buckets[i]) |
99 | tbl->free_node(p, 0); | 162 | oldtbl->free_node(p, 0); |
100 | } | 163 | } |
101 | __mesh_table_free(newtbl); | 164 | return -ENOMEM; |
102 | endgrow: | ||
103 | return NULL; | ||
104 | } | 165 | } |
105 | 166 | ||
167 | static u32 mesh_table_hash(u8 *addr, struct ieee80211_sub_if_data *sdata, | ||
168 | struct mesh_table *tbl) | ||
169 | { | ||
170 | /* Use last four bytes of hw addr and interface index as hash index */ | ||
171 | return jhash_2words(*(u32 *)(addr+2), sdata->dev->ifindex, tbl->hash_rnd) | ||
172 | & tbl->hash_mask; | ||
173 | } | ||
106 | 174 | ||
107 | /* This lock will have the grow table function as writer and add / delete nodes | ||
108 | * as readers. When reading the table (i.e. doing lookups) we are well protected | ||
109 | * by RCU | ||
110 | */ | ||
111 | static DEFINE_RWLOCK(pathtbl_resize_lock); | ||
112 | 175 | ||
113 | /** | 176 | /** |
114 | * | 177 | * |
@@ -218,12 +281,13 @@ struct mesh_path *mpp_path_lookup(u8 *dst, struct ieee80211_sub_if_data *sdata) | |||
218 | */ | 281 | */ |
219 | struct mesh_path *mesh_path_lookup_by_idx(int idx, struct ieee80211_sub_if_data *sdata) | 282 | struct mesh_path *mesh_path_lookup_by_idx(int idx, struct ieee80211_sub_if_data *sdata) |
220 | { | 283 | { |
284 | struct mesh_table *tbl = rcu_dereference(mesh_paths); | ||
221 | struct mpath_node *node; | 285 | struct mpath_node *node; |
222 | struct hlist_node *p; | 286 | struct hlist_node *p; |
223 | int i; | 287 | int i; |
224 | int j = 0; | 288 | int j = 0; |
225 | 289 | ||
226 | for_each_mesh_entry(mesh_paths, p, node, i) { | 290 | for_each_mesh_entry(tbl, p, node, i) { |
227 | if (sdata && node->mpath->sdata != sdata) | 291 | if (sdata && node->mpath->sdata != sdata) |
228 | continue; | 292 | continue; |
229 | if (j++ == idx) { | 293 | if (j++ == idx) { |
@@ -253,6 +317,7 @@ int mesh_path_add(u8 *dst, struct ieee80211_sub_if_data *sdata) | |||
253 | { | 317 | { |
254 | struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; | 318 | struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; |
255 | struct ieee80211_local *local = sdata->local; | 319 | struct ieee80211_local *local = sdata->local; |
320 | struct mesh_table *tbl; | ||
256 | struct mesh_path *mpath, *new_mpath; | 321 | struct mesh_path *mpath, *new_mpath; |
257 | struct mpath_node *node, *new_node; | 322 | struct mpath_node *node, *new_node; |
258 | struct hlist_head *bucket; | 323 | struct hlist_head *bucket; |
@@ -280,7 +345,7 @@ int mesh_path_add(u8 *dst, struct ieee80211_sub_if_data *sdata) | |||
280 | if (!new_node) | 345 | if (!new_node) |
281 | goto err_node_alloc; | 346 | goto err_node_alloc; |
282 | 347 | ||
283 | read_lock(&pathtbl_resize_lock); | 348 | read_lock_bh(&pathtbl_resize_lock); |
284 | memcpy(new_mpath->dst, dst, ETH_ALEN); | 349 | memcpy(new_mpath->dst, dst, ETH_ALEN); |
285 | new_mpath->sdata = sdata; | 350 | new_mpath->sdata = sdata; |
286 | new_mpath->flags = 0; | 351 | new_mpath->flags = 0; |
@@ -292,10 +357,12 @@ int mesh_path_add(u8 *dst, struct ieee80211_sub_if_data *sdata) | |||
292 | spin_lock_init(&new_mpath->state_lock); | 357 | spin_lock_init(&new_mpath->state_lock); |
293 | init_timer(&new_mpath->timer); | 358 | init_timer(&new_mpath->timer); |
294 | 359 | ||
295 | hash_idx = mesh_table_hash(dst, sdata, mesh_paths); | 360 | tbl = resize_dereference_mesh_paths(); |
296 | bucket = &mesh_paths->hash_buckets[hash_idx]; | ||
297 | 361 | ||
298 | spin_lock(&mesh_paths->hashwlock[hash_idx]); | 362 | hash_idx = mesh_table_hash(dst, sdata, tbl); |
363 | bucket = &tbl->hash_buckets[hash_idx]; | ||
364 | |||
365 | spin_lock_bh(&tbl->hashwlock[hash_idx]); | ||
299 | 366 | ||
300 | err = -EEXIST; | 367 | err = -EEXIST; |
301 | hlist_for_each_entry(node, n, bucket, list) { | 368 | hlist_for_each_entry(node, n, bucket, list) { |
@@ -305,14 +372,14 @@ int mesh_path_add(u8 *dst, struct ieee80211_sub_if_data *sdata) | |||
305 | } | 372 | } |
306 | 373 | ||
307 | hlist_add_head_rcu(&new_node->list, bucket); | 374 | hlist_add_head_rcu(&new_node->list, bucket); |
308 | if (atomic_inc_return(&mesh_paths->entries) >= | 375 | if (atomic_inc_return(&tbl->entries) >= |
309 | mesh_paths->mean_chain_len * (mesh_paths->hash_mask + 1)) | 376 | tbl->mean_chain_len * (tbl->hash_mask + 1)) |
310 | grow = 1; | 377 | grow = 1; |
311 | 378 | ||
312 | mesh_paths_generation++; | 379 | mesh_paths_generation++; |
313 | 380 | ||
314 | spin_unlock(&mesh_paths->hashwlock[hash_idx]); | 381 | spin_unlock_bh(&tbl->hashwlock[hash_idx]); |
315 | read_unlock(&pathtbl_resize_lock); | 382 | read_unlock_bh(&pathtbl_resize_lock); |
316 | if (grow) { | 383 | if (grow) { |
317 | set_bit(MESH_WORK_GROW_MPATH_TABLE, &ifmsh->wrkq_flags); | 384 | set_bit(MESH_WORK_GROW_MPATH_TABLE, &ifmsh->wrkq_flags); |
318 | ieee80211_queue_work(&local->hw, &sdata->work); | 385 | ieee80211_queue_work(&local->hw, &sdata->work); |
@@ -320,8 +387,8 @@ int mesh_path_add(u8 *dst, struct ieee80211_sub_if_data *sdata) | |||
320 | return 0; | 387 | return 0; |
321 | 388 | ||
322 | err_exists: | 389 | err_exists: |
323 | spin_unlock(&mesh_paths->hashwlock[hash_idx]); | 390 | spin_unlock_bh(&tbl->hashwlock[hash_idx]); |
324 | read_unlock(&pathtbl_resize_lock); | 391 | read_unlock_bh(&pathtbl_resize_lock); |
325 | kfree(new_node); | 392 | kfree(new_node); |
326 | err_node_alloc: | 393 | err_node_alloc: |
327 | kfree(new_mpath); | 394 | kfree(new_mpath); |
@@ -330,46 +397,59 @@ err_path_alloc: | |||
330 | return err; | 397 | return err; |
331 | } | 398 | } |
332 | 399 | ||
400 | static void mesh_table_free_rcu(struct rcu_head *rcu) | ||
401 | { | ||
402 | struct mesh_table *tbl = container_of(rcu, struct mesh_table, rcu_head); | ||
403 | |||
404 | mesh_table_free(tbl, false); | ||
405 | } | ||
406 | |||
333 | void mesh_mpath_table_grow(void) | 407 | void mesh_mpath_table_grow(void) |
334 | { | 408 | { |
335 | struct mesh_table *oldtbl, *newtbl; | 409 | struct mesh_table *oldtbl, *newtbl; |
336 | 410 | ||
337 | write_lock(&pathtbl_resize_lock); | 411 | write_lock_bh(&pathtbl_resize_lock); |
338 | oldtbl = mesh_paths; | 412 | oldtbl = resize_dereference_mesh_paths(); |
339 | newtbl = mesh_table_grow(mesh_paths); | 413 | newtbl = mesh_table_alloc(oldtbl->size_order + 1); |
340 | if (!newtbl) { | 414 | if (!newtbl) |
341 | write_unlock(&pathtbl_resize_lock); | 415 | goto out; |
342 | return; | 416 | if (mesh_table_grow(oldtbl, newtbl) < 0) { |
417 | __mesh_table_free(newtbl); | ||
418 | goto out; | ||
343 | } | 419 | } |
344 | rcu_assign_pointer(mesh_paths, newtbl); | 420 | rcu_assign_pointer(mesh_paths, newtbl); |
345 | write_unlock(&pathtbl_resize_lock); | ||
346 | 421 | ||
347 | synchronize_rcu(); | 422 | call_rcu(&oldtbl->rcu_head, mesh_table_free_rcu); |
348 | mesh_table_free(oldtbl, false); | 423 | |
424 | out: | ||
425 | write_unlock_bh(&pathtbl_resize_lock); | ||
349 | } | 426 | } |
350 | 427 | ||
351 | void mesh_mpp_table_grow(void) | 428 | void mesh_mpp_table_grow(void) |
352 | { | 429 | { |
353 | struct mesh_table *oldtbl, *newtbl; | 430 | struct mesh_table *oldtbl, *newtbl; |
354 | 431 | ||
355 | write_lock(&pathtbl_resize_lock); | 432 | write_lock_bh(&pathtbl_resize_lock); |
356 | oldtbl = mpp_paths; | 433 | oldtbl = resize_dereference_mpp_paths(); |
357 | newtbl = mesh_table_grow(mpp_paths); | 434 | newtbl = mesh_table_alloc(oldtbl->size_order + 1); |
358 | if (!newtbl) { | 435 | if (!newtbl) |
359 | write_unlock(&pathtbl_resize_lock); | 436 | goto out; |
360 | return; | 437 | if (mesh_table_grow(oldtbl, newtbl) < 0) { |
438 | __mesh_table_free(newtbl); | ||
439 | goto out; | ||
361 | } | 440 | } |
362 | rcu_assign_pointer(mpp_paths, newtbl); | 441 | rcu_assign_pointer(mpp_paths, newtbl); |
363 | write_unlock(&pathtbl_resize_lock); | 442 | call_rcu(&oldtbl->rcu_head, mesh_table_free_rcu); |
364 | 443 | ||
365 | synchronize_rcu(); | 444 | out: |
366 | mesh_table_free(oldtbl, false); | 445 | write_unlock_bh(&pathtbl_resize_lock); |
367 | } | 446 | } |
368 | 447 | ||
369 | int mpp_path_add(u8 *dst, u8 *mpp, struct ieee80211_sub_if_data *sdata) | 448 | int mpp_path_add(u8 *dst, u8 *mpp, struct ieee80211_sub_if_data *sdata) |
370 | { | 449 | { |
371 | struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; | 450 | struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; |
372 | struct ieee80211_local *local = sdata->local; | 451 | struct ieee80211_local *local = sdata->local; |
452 | struct mesh_table *tbl; | ||
373 | struct mesh_path *mpath, *new_mpath; | 453 | struct mesh_path *mpath, *new_mpath; |
374 | struct mpath_node *node, *new_node; | 454 | struct mpath_node *node, *new_node; |
375 | struct hlist_head *bucket; | 455 | struct hlist_head *bucket; |
@@ -394,7 +474,7 @@ int mpp_path_add(u8 *dst, u8 *mpp, struct ieee80211_sub_if_data *sdata) | |||
394 | if (!new_node) | 474 | if (!new_node) |
395 | goto err_node_alloc; | 475 | goto err_node_alloc; |
396 | 476 | ||
397 | read_lock(&pathtbl_resize_lock); | 477 | read_lock_bh(&pathtbl_resize_lock); |
398 | memcpy(new_mpath->dst, dst, ETH_ALEN); | 478 | memcpy(new_mpath->dst, dst, ETH_ALEN); |
399 | memcpy(new_mpath->mpp, mpp, ETH_ALEN); | 479 | memcpy(new_mpath->mpp, mpp, ETH_ALEN); |
400 | new_mpath->sdata = sdata; | 480 | new_mpath->sdata = sdata; |
@@ -404,10 +484,12 @@ int mpp_path_add(u8 *dst, u8 *mpp, struct ieee80211_sub_if_data *sdata) | |||
404 | new_mpath->exp_time = jiffies; | 484 | new_mpath->exp_time = jiffies; |
405 | spin_lock_init(&new_mpath->state_lock); | 485 | spin_lock_init(&new_mpath->state_lock); |
406 | 486 | ||
407 | hash_idx = mesh_table_hash(dst, sdata, mpp_paths); | 487 | tbl = resize_dereference_mpp_paths(); |
408 | bucket = &mpp_paths->hash_buckets[hash_idx]; | ||
409 | 488 | ||
410 | spin_lock(&mpp_paths->hashwlock[hash_idx]); | 489 | hash_idx = mesh_table_hash(dst, sdata, tbl); |
490 | bucket = &tbl->hash_buckets[hash_idx]; | ||
491 | |||
492 | spin_lock_bh(&tbl->hashwlock[hash_idx]); | ||
411 | 493 | ||
412 | err = -EEXIST; | 494 | err = -EEXIST; |
413 | hlist_for_each_entry(node, n, bucket, list) { | 495 | hlist_for_each_entry(node, n, bucket, list) { |
@@ -417,12 +499,12 @@ int mpp_path_add(u8 *dst, u8 *mpp, struct ieee80211_sub_if_data *sdata) | |||
417 | } | 499 | } |
418 | 500 | ||
419 | hlist_add_head_rcu(&new_node->list, bucket); | 501 | hlist_add_head_rcu(&new_node->list, bucket); |
420 | if (atomic_inc_return(&mpp_paths->entries) >= | 502 | if (atomic_inc_return(&tbl->entries) >= |
421 | mpp_paths->mean_chain_len * (mpp_paths->hash_mask + 1)) | 503 | tbl->mean_chain_len * (tbl->hash_mask + 1)) |
422 | grow = 1; | 504 | grow = 1; |
423 | 505 | ||
424 | spin_unlock(&mpp_paths->hashwlock[hash_idx]); | 506 | spin_unlock_bh(&tbl->hashwlock[hash_idx]); |
425 | read_unlock(&pathtbl_resize_lock); | 507 | read_unlock_bh(&pathtbl_resize_lock); |
426 | if (grow) { | 508 | if (grow) { |
427 | set_bit(MESH_WORK_GROW_MPP_TABLE, &ifmsh->wrkq_flags); | 509 | set_bit(MESH_WORK_GROW_MPP_TABLE, &ifmsh->wrkq_flags); |
428 | ieee80211_queue_work(&local->hw, &sdata->work); | 510 | ieee80211_queue_work(&local->hw, &sdata->work); |
@@ -430,8 +512,8 @@ int mpp_path_add(u8 *dst, u8 *mpp, struct ieee80211_sub_if_data *sdata) | |||
430 | return 0; | 512 | return 0; |
431 | 513 | ||
432 | err_exists: | 514 | err_exists: |
433 | spin_unlock(&mpp_paths->hashwlock[hash_idx]); | 515 | spin_unlock_bh(&tbl->hashwlock[hash_idx]); |
434 | read_unlock(&pathtbl_resize_lock); | 516 | read_unlock_bh(&pathtbl_resize_lock); |
435 | kfree(new_node); | 517 | kfree(new_node); |
436 | err_node_alloc: | 518 | err_node_alloc: |
437 | kfree(new_mpath); | 519 | kfree(new_mpath); |
@@ -450,6 +532,7 @@ err_path_alloc: | |||
450 | */ | 532 | */ |
451 | void mesh_plink_broken(struct sta_info *sta) | 533 | void mesh_plink_broken(struct sta_info *sta) |
452 | { | 534 | { |
535 | struct mesh_table *tbl; | ||
453 | static const u8 bcast[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}; | 536 | static const u8 bcast[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}; |
454 | struct mesh_path *mpath; | 537 | struct mesh_path *mpath; |
455 | struct mpath_node *node; | 538 | struct mpath_node *node; |
@@ -458,17 +541,18 @@ void mesh_plink_broken(struct sta_info *sta) | |||
458 | int i; | 541 | int i; |
459 | 542 | ||
460 | rcu_read_lock(); | 543 | rcu_read_lock(); |
461 | for_each_mesh_entry(mesh_paths, p, node, i) { | 544 | tbl = rcu_dereference(mesh_paths); |
545 | for_each_mesh_entry(tbl, p, node, i) { | ||
462 | mpath = node->mpath; | 546 | mpath = node->mpath; |
463 | spin_lock_bh(&mpath->state_lock); | 547 | spin_lock_bh(&mpath->state_lock); |
464 | if (mpath->next_hop == sta && | 548 | if (rcu_dereference(mpath->next_hop) == sta && |
465 | mpath->flags & MESH_PATH_ACTIVE && | 549 | mpath->flags & MESH_PATH_ACTIVE && |
466 | !(mpath->flags & MESH_PATH_FIXED)) { | 550 | !(mpath->flags & MESH_PATH_FIXED)) { |
467 | mpath->flags &= ~MESH_PATH_ACTIVE; | 551 | mpath->flags &= ~MESH_PATH_ACTIVE; |
468 | ++mpath->sn; | 552 | ++mpath->sn; |
469 | spin_unlock_bh(&mpath->state_lock); | 553 | spin_unlock_bh(&mpath->state_lock); |
470 | mesh_path_error_tx(MESH_TTL, mpath->dst, | 554 | mesh_path_error_tx(sdata->u.mesh.mshcfg.element_ttl, |
471 | cpu_to_le32(mpath->sn), | 555 | mpath->dst, cpu_to_le32(mpath->sn), |
472 | cpu_to_le16(PERR_RCODE_DEST_UNREACH), | 556 | cpu_to_le16(PERR_RCODE_DEST_UNREACH), |
473 | bcast, sdata); | 557 | bcast, sdata); |
474 | } else | 558 | } else |
@@ -490,30 +574,38 @@ void mesh_plink_broken(struct sta_info *sta) | |||
490 | */ | 574 | */ |
491 | void mesh_path_flush_by_nexthop(struct sta_info *sta) | 575 | void mesh_path_flush_by_nexthop(struct sta_info *sta) |
492 | { | 576 | { |
577 | struct mesh_table *tbl; | ||
493 | struct mesh_path *mpath; | 578 | struct mesh_path *mpath; |
494 | struct mpath_node *node; | 579 | struct mpath_node *node; |
495 | struct hlist_node *p; | 580 | struct hlist_node *p; |
496 | int i; | 581 | int i; |
497 | 582 | ||
498 | for_each_mesh_entry(mesh_paths, p, node, i) { | 583 | rcu_read_lock(); |
584 | tbl = rcu_dereference(mesh_paths); | ||
585 | for_each_mesh_entry(tbl, p, node, i) { | ||
499 | mpath = node->mpath; | 586 | mpath = node->mpath; |
500 | if (mpath->next_hop == sta) | 587 | if (rcu_dereference(mpath->next_hop) == sta) |
501 | mesh_path_del(mpath->dst, mpath->sdata); | 588 | mesh_path_del(mpath->dst, mpath->sdata); |
502 | } | 589 | } |
590 | rcu_read_unlock(); | ||
503 | } | 591 | } |
504 | 592 | ||
505 | void mesh_path_flush(struct ieee80211_sub_if_data *sdata) | 593 | void mesh_path_flush(struct ieee80211_sub_if_data *sdata) |
506 | { | 594 | { |
595 | struct mesh_table *tbl; | ||
507 | struct mesh_path *mpath; | 596 | struct mesh_path *mpath; |
508 | struct mpath_node *node; | 597 | struct mpath_node *node; |
509 | struct hlist_node *p; | 598 | struct hlist_node *p; |
510 | int i; | 599 | int i; |
511 | 600 | ||
512 | for_each_mesh_entry(mesh_paths, p, node, i) { | 601 | rcu_read_lock(); |
602 | tbl = rcu_dereference(mesh_paths); | ||
603 | for_each_mesh_entry(tbl, p, node, i) { | ||
513 | mpath = node->mpath; | 604 | mpath = node->mpath; |
514 | if (mpath->sdata == sdata) | 605 | if (mpath->sdata == sdata) |
515 | mesh_path_del(mpath->dst, mpath->sdata); | 606 | mesh_path_del(mpath->dst, mpath->sdata); |
516 | } | 607 | } |
608 | rcu_read_unlock(); | ||
517 | } | 609 | } |
518 | 610 | ||
519 | static void mesh_path_node_reclaim(struct rcu_head *rp) | 611 | static void mesh_path_node_reclaim(struct rcu_head *rp) |
@@ -537,6 +629,7 @@ static void mesh_path_node_reclaim(struct rcu_head *rp) | |||
537 | */ | 629 | */ |
538 | int mesh_path_del(u8 *addr, struct ieee80211_sub_if_data *sdata) | 630 | int mesh_path_del(u8 *addr, struct ieee80211_sub_if_data *sdata) |
539 | { | 631 | { |
632 | struct mesh_table *tbl; | ||
540 | struct mesh_path *mpath; | 633 | struct mesh_path *mpath; |
541 | struct mpath_node *node; | 634 | struct mpath_node *node; |
542 | struct hlist_head *bucket; | 635 | struct hlist_head *bucket; |
@@ -544,20 +637,21 @@ int mesh_path_del(u8 *addr, struct ieee80211_sub_if_data *sdata) | |||
544 | int hash_idx; | 637 | int hash_idx; |
545 | int err = 0; | 638 | int err = 0; |
546 | 639 | ||
547 | read_lock(&pathtbl_resize_lock); | 640 | read_lock_bh(&pathtbl_resize_lock); |
548 | hash_idx = mesh_table_hash(addr, sdata, mesh_paths); | 641 | tbl = resize_dereference_mesh_paths(); |
549 | bucket = &mesh_paths->hash_buckets[hash_idx]; | 642 | hash_idx = mesh_table_hash(addr, sdata, tbl); |
643 | bucket = &tbl->hash_buckets[hash_idx]; | ||
550 | 644 | ||
551 | spin_lock(&mesh_paths->hashwlock[hash_idx]); | 645 | spin_lock_bh(&tbl->hashwlock[hash_idx]); |
552 | hlist_for_each_entry(node, n, bucket, list) { | 646 | hlist_for_each_entry(node, n, bucket, list) { |
553 | mpath = node->mpath; | 647 | mpath = node->mpath; |
554 | if (mpath->sdata == sdata && | 648 | if (mpath->sdata == sdata && |
555 | memcmp(addr, mpath->dst, ETH_ALEN) == 0) { | 649 | memcmp(addr, mpath->dst, ETH_ALEN) == 0) { |
556 | spin_lock_bh(&mpath->state_lock); | 650 | spin_lock_bh(&mpath->state_lock); |
557 | mpath->flags |= MESH_PATH_RESOLVING; | 651 | mpath->flags |= MESH_PATH_RESOLVING; |
558 | hlist_del_rcu(&node->list); | 652 | hlist_del_rcu(&node->list); |
559 | call_rcu(&node->rcu, mesh_path_node_reclaim); | 653 | call_rcu(&node->rcu, mesh_path_node_reclaim); |
560 | atomic_dec(&mesh_paths->entries); | 654 | atomic_dec(&tbl->entries); |
561 | spin_unlock_bh(&mpath->state_lock); | 655 | spin_unlock_bh(&mpath->state_lock); |
562 | goto enddel; | 656 | goto enddel; |
563 | } | 657 | } |
@@ -566,8 +660,8 @@ int mesh_path_del(u8 *addr, struct ieee80211_sub_if_data *sdata) | |||
566 | err = -ENXIO; | 660 | err = -ENXIO; |
567 | enddel: | 661 | enddel: |
568 | mesh_paths_generation++; | 662 | mesh_paths_generation++; |
569 | spin_unlock(&mesh_paths->hashwlock[hash_idx]); | 663 | spin_unlock_bh(&tbl->hashwlock[hash_idx]); |
570 | read_unlock(&pathtbl_resize_lock); | 664 | read_unlock_bh(&pathtbl_resize_lock); |
571 | return err; | 665 | return err; |
572 | } | 666 | } |
573 | 667 | ||
@@ -614,7 +708,8 @@ void mesh_path_discard_frame(struct sk_buff *skb, | |||
614 | mpath = mesh_path_lookup(da, sdata); | 708 | mpath = mesh_path_lookup(da, sdata); |
615 | if (mpath) | 709 | if (mpath) |
616 | sn = ++mpath->sn; | 710 | sn = ++mpath->sn; |
617 | mesh_path_error_tx(MESH_TTL, skb->data, cpu_to_le32(sn), | 711 | mesh_path_error_tx(sdata->u.mesh.mshcfg.element_ttl, skb->data, |
712 | cpu_to_le32(sn), | ||
618 | cpu_to_le16(PERR_RCODE_NO_ROUTE), ra, sdata); | 713 | cpu_to_le16(PERR_RCODE_NO_ROUTE), ra, sdata); |
619 | } | 714 | } |
620 | 715 | ||
@@ -627,7 +722,7 @@ void mesh_path_discard_frame(struct sk_buff *skb, | |||
627 | * | 722 | * |
628 | * @mpath: mesh path whose queue has to be freed | 723 | * @mpath: mesh path whose queue has to be freed |
629 | * | 724 | * |
630 | * Locking: the function must me called withing a rcu_read_lock region | 725 | * Locking: the function must me called within a rcu_read_lock region |
631 | */ | 726 | */ |
632 | void mesh_path_flush_pending(struct mesh_path *mpath) | 727 | void mesh_path_flush_pending(struct mesh_path *mpath) |
633 | { | 728 | { |
@@ -666,8 +761,10 @@ static void mesh_path_node_free(struct hlist_node *p, bool free_leafs) | |||
666 | struct mpath_node *node = hlist_entry(p, struct mpath_node, list); | 761 | struct mpath_node *node = hlist_entry(p, struct mpath_node, list); |
667 | mpath = node->mpath; | 762 | mpath = node->mpath; |
668 | hlist_del_rcu(p); | 763 | hlist_del_rcu(p); |
669 | if (free_leafs) | 764 | if (free_leafs) { |
765 | del_timer_sync(&mpath->timer); | ||
670 | kfree(mpath); | 766 | kfree(mpath); |
767 | } | ||
671 | kfree(node); | 768 | kfree(node); |
672 | } | 769 | } |
673 | 770 | ||
@@ -692,52 +789,60 @@ static int mesh_path_node_copy(struct hlist_node *p, struct mesh_table *newtbl) | |||
692 | 789 | ||
693 | int mesh_pathtbl_init(void) | 790 | int mesh_pathtbl_init(void) |
694 | { | 791 | { |
695 | mesh_paths = mesh_table_alloc(INIT_PATHS_SIZE_ORDER); | 792 | struct mesh_table *tbl_path, *tbl_mpp; |
696 | if (!mesh_paths) | 793 | |
794 | tbl_path = mesh_table_alloc(INIT_PATHS_SIZE_ORDER); | ||
795 | if (!tbl_path) | ||
697 | return -ENOMEM; | 796 | return -ENOMEM; |
698 | mesh_paths->free_node = &mesh_path_node_free; | 797 | tbl_path->free_node = &mesh_path_node_free; |
699 | mesh_paths->copy_node = &mesh_path_node_copy; | 798 | tbl_path->copy_node = &mesh_path_node_copy; |
700 | mesh_paths->mean_chain_len = MEAN_CHAIN_LEN; | 799 | tbl_path->mean_chain_len = MEAN_CHAIN_LEN; |
701 | 800 | ||
702 | mpp_paths = mesh_table_alloc(INIT_PATHS_SIZE_ORDER); | 801 | tbl_mpp = mesh_table_alloc(INIT_PATHS_SIZE_ORDER); |
703 | if (!mpp_paths) { | 802 | if (!tbl_mpp) { |
704 | mesh_table_free(mesh_paths, true); | 803 | mesh_table_free(tbl_path, true); |
705 | return -ENOMEM; | 804 | return -ENOMEM; |
706 | } | 805 | } |
707 | mpp_paths->free_node = &mesh_path_node_free; | 806 | tbl_mpp->free_node = &mesh_path_node_free; |
708 | mpp_paths->copy_node = &mesh_path_node_copy; | 807 | tbl_mpp->copy_node = &mesh_path_node_copy; |
709 | mpp_paths->mean_chain_len = MEAN_CHAIN_LEN; | 808 | tbl_mpp->mean_chain_len = MEAN_CHAIN_LEN; |
809 | |||
810 | /* Need no locking since this is during init */ | ||
811 | RCU_INIT_POINTER(mesh_paths, tbl_path); | ||
812 | RCU_INIT_POINTER(mpp_paths, tbl_mpp); | ||
710 | 813 | ||
711 | return 0; | 814 | return 0; |
712 | } | 815 | } |
713 | 816 | ||
714 | void mesh_path_expire(struct ieee80211_sub_if_data *sdata) | 817 | void mesh_path_expire(struct ieee80211_sub_if_data *sdata) |
715 | { | 818 | { |
819 | struct mesh_table *tbl; | ||
716 | struct mesh_path *mpath; | 820 | struct mesh_path *mpath; |
717 | struct mpath_node *node; | 821 | struct mpath_node *node; |
718 | struct hlist_node *p; | 822 | struct hlist_node *p; |
719 | int i; | 823 | int i; |
720 | 824 | ||
721 | read_lock(&pathtbl_resize_lock); | 825 | rcu_read_lock(); |
722 | for_each_mesh_entry(mesh_paths, p, node, i) { | 826 | tbl = rcu_dereference(mesh_paths); |
827 | for_each_mesh_entry(tbl, p, node, i) { | ||
723 | if (node->mpath->sdata != sdata) | 828 | if (node->mpath->sdata != sdata) |
724 | continue; | 829 | continue; |
725 | mpath = node->mpath; | 830 | mpath = node->mpath; |
726 | spin_lock_bh(&mpath->state_lock); | 831 | spin_lock_bh(&mpath->state_lock); |
727 | if ((!(mpath->flags & MESH_PATH_RESOLVING)) && | 832 | if ((!(mpath->flags & MESH_PATH_RESOLVING)) && |
728 | (!(mpath->flags & MESH_PATH_FIXED)) && | 833 | (!(mpath->flags & MESH_PATH_FIXED)) && |
729 | time_after(jiffies, | 834 | time_after(jiffies, mpath->exp_time + MESH_PATH_EXPIRE)) { |
730 | mpath->exp_time + MESH_PATH_EXPIRE)) { | ||
731 | spin_unlock_bh(&mpath->state_lock); | 835 | spin_unlock_bh(&mpath->state_lock); |
732 | mesh_path_del(mpath->dst, mpath->sdata); | 836 | mesh_path_del(mpath->dst, mpath->sdata); |
733 | } else | 837 | } else |
734 | spin_unlock_bh(&mpath->state_lock); | 838 | spin_unlock_bh(&mpath->state_lock); |
735 | } | 839 | } |
736 | read_unlock(&pathtbl_resize_lock); | 840 | rcu_read_unlock(); |
737 | } | 841 | } |
738 | 842 | ||
739 | void mesh_pathtbl_unregister(void) | 843 | void mesh_pathtbl_unregister(void) |
740 | { | 844 | { |
741 | mesh_table_free(mesh_paths, true); | 845 | /* no need for locking during exit path */ |
742 | mesh_table_free(mpp_paths, true); | 846 | mesh_table_free(rcu_dereference_raw(mesh_paths), true); |
847 | mesh_table_free(rcu_dereference_raw(mpp_paths), true); | ||
743 | } | 848 | } |