diff options
author | John W. Linville <linville@tuxdriver.com> | 2011-05-16 14:55:42 -0400 |
---|---|---|
committer | John W. Linville <linville@tuxdriver.com> | 2011-05-16 19:32:19 -0400 |
commit | e00cf3b9eb7839b952e434a75bff6b99e47337ac (patch) | |
tree | ef583ab8ac09bf703026650d4bc7777e6a3864d3 /net/mac80211/mesh_pathtbl.c | |
parent | 1a8218e96271790a07dd7065a2ef173e0f67e328 (diff) | |
parent | 3b8ab88acaceb505aa06ef3bbf3a73b92470ae78 (diff) |
Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/linville/wireless-next-2.6 into for-davem
Conflicts:
drivers/net/wireless/iwlwifi/iwl-agn-tx.c
net/mac80211/sta_info.h
Diffstat (limited to 'net/mac80211/mesh_pathtbl.c')
-rw-r--r-- | net/mac80211/mesh_pathtbl.c | 123 |
1 files changed, 87 insertions, 36 deletions
diff --git a/net/mac80211/mesh_pathtbl.c b/net/mac80211/mesh_pathtbl.c index 35c715adaae2..83ce48e31913 100644 --- a/net/mac80211/mesh_pathtbl.c +++ b/net/mac80211/mesh_pathtbl.c | |||
@@ -40,6 +40,50 @@ static struct mesh_table *mesh_paths; | |||
40 | static struct mesh_table *mpp_paths; /* Store paths for MPP&MAP */ | 40 | static struct mesh_table *mpp_paths; /* Store paths for MPP&MAP */ |
41 | 41 | ||
42 | int mesh_paths_generation; | 42 | int mesh_paths_generation; |
43 | |||
44 | /* This lock will have the grow table function as writer and add / delete nodes | ||
45 | * as readers. When reading the table (i.e. doing lookups) we are well protected | ||
46 | * by RCU | ||
47 | */ | ||
48 | static DEFINE_RWLOCK(pathtbl_resize_lock); | ||
49 | |||
50 | |||
51 | static struct mesh_table *mesh_table_alloc(int size_order) | ||
52 | { | ||
53 | int i; | ||
54 | struct mesh_table *newtbl; | ||
55 | |||
56 | newtbl = kmalloc(sizeof(struct mesh_table), GFP_KERNEL); | ||
57 | if (!newtbl) | ||
58 | return NULL; | ||
59 | |||
60 | newtbl->hash_buckets = kzalloc(sizeof(struct hlist_head) * | ||
61 | (1 << size_order), GFP_KERNEL); | ||
62 | |||
63 | if (!newtbl->hash_buckets) { | ||
64 | kfree(newtbl); | ||
65 | return NULL; | ||
66 | } | ||
67 | |||
68 | newtbl->hashwlock = kmalloc(sizeof(spinlock_t) * | ||
69 | (1 << size_order), GFP_KERNEL); | ||
70 | if (!newtbl->hashwlock) { | ||
71 | kfree(newtbl->hash_buckets); | ||
72 | kfree(newtbl); | ||
73 | return NULL; | ||
74 | } | ||
75 | |||
76 | newtbl->size_order = size_order; | ||
77 | newtbl->hash_mask = (1 << size_order) - 1; | ||
78 | atomic_set(&newtbl->entries, 0); | ||
79 | get_random_bytes(&newtbl->hash_rnd, | ||
80 | sizeof(newtbl->hash_rnd)); | ||
81 | for (i = 0; i <= newtbl->hash_mask; i++) | ||
82 | spin_lock_init(&newtbl->hashwlock[i]); | ||
83 | |||
84 | return newtbl; | ||
85 | } | ||
86 | |||
43 | static void __mesh_table_free(struct mesh_table *tbl) | 87 | static void __mesh_table_free(struct mesh_table *tbl) |
44 | { | 88 | { |
45 | kfree(tbl->hash_buckets); | 89 | kfree(tbl->hash_buckets); |
@@ -47,7 +91,7 @@ static void __mesh_table_free(struct mesh_table *tbl) | |||
47 | kfree(tbl); | 91 | kfree(tbl); |
48 | } | 92 | } |
49 | 93 | ||
50 | void mesh_table_free(struct mesh_table *tbl, bool free_leafs) | 94 | static void mesh_table_free(struct mesh_table *tbl, bool free_leafs) |
51 | { | 95 | { |
52 | struct hlist_head *mesh_hash; | 96 | struct hlist_head *mesh_hash; |
53 | struct hlist_node *p, *q; | 97 | struct hlist_node *p, *q; |
@@ -55,18 +99,18 @@ void mesh_table_free(struct mesh_table *tbl, bool free_leafs) | |||
55 | 99 | ||
56 | mesh_hash = tbl->hash_buckets; | 100 | mesh_hash = tbl->hash_buckets; |
57 | for (i = 0; i <= tbl->hash_mask; i++) { | 101 | for (i = 0; i <= tbl->hash_mask; i++) { |
58 | spin_lock(&tbl->hashwlock[i]); | 102 | spin_lock_bh(&tbl->hashwlock[i]); |
59 | hlist_for_each_safe(p, q, &mesh_hash[i]) { | 103 | hlist_for_each_safe(p, q, &mesh_hash[i]) { |
60 | tbl->free_node(p, free_leafs); | 104 | tbl->free_node(p, free_leafs); |
61 | atomic_dec(&tbl->entries); | 105 | atomic_dec(&tbl->entries); |
62 | } | 106 | } |
63 | spin_unlock(&tbl->hashwlock[i]); | 107 | spin_unlock_bh(&tbl->hashwlock[i]); |
64 | } | 108 | } |
65 | __mesh_table_free(tbl); | 109 | __mesh_table_free(tbl); |
66 | } | 110 | } |
67 | 111 | ||
68 | static int mesh_table_grow(struct mesh_table *oldtbl, | 112 | static int mesh_table_grow(struct mesh_table *oldtbl, |
69 | struct mesh_table *newtbl) | 113 | struct mesh_table *newtbl) |
70 | { | 114 | { |
71 | struct hlist_head *oldhash; | 115 | struct hlist_head *oldhash; |
72 | struct hlist_node *p, *q; | 116 | struct hlist_node *p, *q; |
@@ -76,7 +120,6 @@ static int mesh_table_grow(struct mesh_table *oldtbl, | |||
76 | < oldtbl->mean_chain_len * (oldtbl->hash_mask + 1)) | 120 | < oldtbl->mean_chain_len * (oldtbl->hash_mask + 1)) |
77 | return -EAGAIN; | 121 | return -EAGAIN; |
78 | 122 | ||
79 | |||
80 | newtbl->free_node = oldtbl->free_node; | 123 | newtbl->free_node = oldtbl->free_node; |
81 | newtbl->mean_chain_len = oldtbl->mean_chain_len; | 124 | newtbl->mean_chain_len = oldtbl->mean_chain_len; |
82 | newtbl->copy_node = oldtbl->copy_node; | 125 | newtbl->copy_node = oldtbl->copy_node; |
@@ -98,12 +141,14 @@ errcopy: | |||
98 | return -ENOMEM; | 141 | return -ENOMEM; |
99 | } | 142 | } |
100 | 143 | ||
144 | static u32 mesh_table_hash(u8 *addr, struct ieee80211_sub_if_data *sdata, | ||
145 | struct mesh_table *tbl) | ||
146 | { | ||
147 | /* Use last four bytes of hw addr and interface index as hash index */ | ||
148 | return jhash_2words(*(u32 *)(addr+2), sdata->dev->ifindex, tbl->hash_rnd) | ||
149 | & tbl->hash_mask; | ||
150 | } | ||
101 | 151 | ||
102 | /* This lock will have the grow table function as writer and add / delete nodes | ||
103 | * as readers. When reading the table (i.e. doing lookups) we are well protected | ||
104 | * by RCU | ||
105 | */ | ||
106 | static DEFINE_RWLOCK(pathtbl_resize_lock); | ||
107 | 152 | ||
108 | /** | 153 | /** |
109 | * | 154 | * |
@@ -275,7 +320,7 @@ int mesh_path_add(u8 *dst, struct ieee80211_sub_if_data *sdata) | |||
275 | if (!new_node) | 320 | if (!new_node) |
276 | goto err_node_alloc; | 321 | goto err_node_alloc; |
277 | 322 | ||
278 | read_lock(&pathtbl_resize_lock); | 323 | read_lock_bh(&pathtbl_resize_lock); |
279 | memcpy(new_mpath->dst, dst, ETH_ALEN); | 324 | memcpy(new_mpath->dst, dst, ETH_ALEN); |
280 | new_mpath->sdata = sdata; | 325 | new_mpath->sdata = sdata; |
281 | new_mpath->flags = 0; | 326 | new_mpath->flags = 0; |
@@ -290,7 +335,7 @@ int mesh_path_add(u8 *dst, struct ieee80211_sub_if_data *sdata) | |||
290 | hash_idx = mesh_table_hash(dst, sdata, mesh_paths); | 335 | hash_idx = mesh_table_hash(dst, sdata, mesh_paths); |
291 | bucket = &mesh_paths->hash_buckets[hash_idx]; | 336 | bucket = &mesh_paths->hash_buckets[hash_idx]; |
292 | 337 | ||
293 | spin_lock(&mesh_paths->hashwlock[hash_idx]); | 338 | spin_lock_bh(&mesh_paths->hashwlock[hash_idx]); |
294 | 339 | ||
295 | err = -EEXIST; | 340 | err = -EEXIST; |
296 | hlist_for_each_entry(node, n, bucket, list) { | 341 | hlist_for_each_entry(node, n, bucket, list) { |
@@ -306,8 +351,8 @@ int mesh_path_add(u8 *dst, struct ieee80211_sub_if_data *sdata) | |||
306 | 351 | ||
307 | mesh_paths_generation++; | 352 | mesh_paths_generation++; |
308 | 353 | ||
309 | spin_unlock(&mesh_paths->hashwlock[hash_idx]); | 354 | spin_unlock_bh(&mesh_paths->hashwlock[hash_idx]); |
310 | read_unlock(&pathtbl_resize_lock); | 355 | read_unlock_bh(&pathtbl_resize_lock); |
311 | if (grow) { | 356 | if (grow) { |
312 | set_bit(MESH_WORK_GROW_MPATH_TABLE, &ifmsh->wrkq_flags); | 357 | set_bit(MESH_WORK_GROW_MPATH_TABLE, &ifmsh->wrkq_flags); |
313 | ieee80211_queue_work(&local->hw, &sdata->work); | 358 | ieee80211_queue_work(&local->hw, &sdata->work); |
@@ -315,8 +360,8 @@ int mesh_path_add(u8 *dst, struct ieee80211_sub_if_data *sdata) | |||
315 | return 0; | 360 | return 0; |
316 | 361 | ||
317 | err_exists: | 362 | err_exists: |
318 | spin_unlock(&mesh_paths->hashwlock[hash_idx]); | 363 | spin_unlock_bh(&mesh_paths->hashwlock[hash_idx]); |
319 | read_unlock(&pathtbl_resize_lock); | 364 | read_unlock_bh(&pathtbl_resize_lock); |
320 | kfree(new_node); | 365 | kfree(new_node); |
321 | err_node_alloc: | 366 | err_node_alloc: |
322 | kfree(new_mpath); | 367 | kfree(new_mpath); |
@@ -329,18 +374,21 @@ void mesh_mpath_table_grow(void) | |||
329 | { | 374 | { |
330 | struct mesh_table *oldtbl, *newtbl; | 375 | struct mesh_table *oldtbl, *newtbl; |
331 | 376 | ||
332 | newtbl = mesh_table_alloc(mesh_paths->size_order + 1); | 377 | rcu_read_lock(); |
378 | newtbl = mesh_table_alloc(rcu_dereference(mesh_paths)->size_order + 1); | ||
333 | if (!newtbl) | 379 | if (!newtbl) |
334 | return; | 380 | return; |
335 | write_lock(&pathtbl_resize_lock); | 381 | write_lock_bh(&pathtbl_resize_lock); |
336 | oldtbl = mesh_paths; | 382 | oldtbl = mesh_paths; |
337 | if (mesh_table_grow(mesh_paths, newtbl) < 0) { | 383 | if (mesh_table_grow(mesh_paths, newtbl) < 0) { |
384 | rcu_read_unlock(); | ||
338 | __mesh_table_free(newtbl); | 385 | __mesh_table_free(newtbl); |
339 | write_unlock(&pathtbl_resize_lock); | 386 | write_unlock_bh(&pathtbl_resize_lock); |
340 | return; | 387 | return; |
341 | } | 388 | } |
389 | rcu_read_unlock(); | ||
342 | rcu_assign_pointer(mesh_paths, newtbl); | 390 | rcu_assign_pointer(mesh_paths, newtbl); |
343 | write_unlock(&pathtbl_resize_lock); | 391 | write_unlock_bh(&pathtbl_resize_lock); |
344 | 392 | ||
345 | synchronize_rcu(); | 393 | synchronize_rcu(); |
346 | mesh_table_free(oldtbl, false); | 394 | mesh_table_free(oldtbl, false); |
@@ -350,18 +398,21 @@ void mesh_mpp_table_grow(void) | |||
350 | { | 398 | { |
351 | struct mesh_table *oldtbl, *newtbl; | 399 | struct mesh_table *oldtbl, *newtbl; |
352 | 400 | ||
353 | newtbl = mesh_table_alloc(mpp_paths->size_order + 1); | 401 | rcu_read_lock(); |
402 | newtbl = mesh_table_alloc(rcu_dereference(mpp_paths)->size_order + 1); | ||
354 | if (!newtbl) | 403 | if (!newtbl) |
355 | return; | 404 | return; |
356 | write_lock(&pathtbl_resize_lock); | 405 | write_lock_bh(&pathtbl_resize_lock); |
357 | oldtbl = mpp_paths; | 406 | oldtbl = mpp_paths; |
358 | if (mesh_table_grow(mpp_paths, newtbl) < 0) { | 407 | if (mesh_table_grow(mpp_paths, newtbl) < 0) { |
408 | rcu_read_unlock(); | ||
359 | __mesh_table_free(newtbl); | 409 | __mesh_table_free(newtbl); |
360 | write_unlock(&pathtbl_resize_lock); | 410 | write_unlock_bh(&pathtbl_resize_lock); |
361 | return; | 411 | return; |
362 | } | 412 | } |
413 | rcu_read_unlock(); | ||
363 | rcu_assign_pointer(mpp_paths, newtbl); | 414 | rcu_assign_pointer(mpp_paths, newtbl); |
364 | write_unlock(&pathtbl_resize_lock); | 415 | write_unlock_bh(&pathtbl_resize_lock); |
365 | 416 | ||
366 | synchronize_rcu(); | 417 | synchronize_rcu(); |
367 | mesh_table_free(oldtbl, false); | 418 | mesh_table_free(oldtbl, false); |
@@ -395,7 +446,7 @@ int mpp_path_add(u8 *dst, u8 *mpp, struct ieee80211_sub_if_data *sdata) | |||
395 | if (!new_node) | 446 | if (!new_node) |
396 | goto err_node_alloc; | 447 | goto err_node_alloc; |
397 | 448 | ||
398 | read_lock(&pathtbl_resize_lock); | 449 | read_lock_bh(&pathtbl_resize_lock); |
399 | memcpy(new_mpath->dst, dst, ETH_ALEN); | 450 | memcpy(new_mpath->dst, dst, ETH_ALEN); |
400 | memcpy(new_mpath->mpp, mpp, ETH_ALEN); | 451 | memcpy(new_mpath->mpp, mpp, ETH_ALEN); |
401 | new_mpath->sdata = sdata; | 452 | new_mpath->sdata = sdata; |
@@ -408,7 +459,7 @@ int mpp_path_add(u8 *dst, u8 *mpp, struct ieee80211_sub_if_data *sdata) | |||
408 | hash_idx = mesh_table_hash(dst, sdata, mpp_paths); | 459 | hash_idx = mesh_table_hash(dst, sdata, mpp_paths); |
409 | bucket = &mpp_paths->hash_buckets[hash_idx]; | 460 | bucket = &mpp_paths->hash_buckets[hash_idx]; |
410 | 461 | ||
411 | spin_lock(&mpp_paths->hashwlock[hash_idx]); | 462 | spin_lock_bh(&mpp_paths->hashwlock[hash_idx]); |
412 | 463 | ||
413 | err = -EEXIST; | 464 | err = -EEXIST; |
414 | hlist_for_each_entry(node, n, bucket, list) { | 465 | hlist_for_each_entry(node, n, bucket, list) { |
@@ -422,8 +473,8 @@ int mpp_path_add(u8 *dst, u8 *mpp, struct ieee80211_sub_if_data *sdata) | |||
422 | mpp_paths->mean_chain_len * (mpp_paths->hash_mask + 1)) | 473 | mpp_paths->mean_chain_len * (mpp_paths->hash_mask + 1)) |
423 | grow = 1; | 474 | grow = 1; |
424 | 475 | ||
425 | spin_unlock(&mpp_paths->hashwlock[hash_idx]); | 476 | spin_unlock_bh(&mpp_paths->hashwlock[hash_idx]); |
426 | read_unlock(&pathtbl_resize_lock); | 477 | read_unlock_bh(&pathtbl_resize_lock); |
427 | if (grow) { | 478 | if (grow) { |
428 | set_bit(MESH_WORK_GROW_MPP_TABLE, &ifmsh->wrkq_flags); | 479 | set_bit(MESH_WORK_GROW_MPP_TABLE, &ifmsh->wrkq_flags); |
429 | ieee80211_queue_work(&local->hw, &sdata->work); | 480 | ieee80211_queue_work(&local->hw, &sdata->work); |
@@ -431,8 +482,8 @@ int mpp_path_add(u8 *dst, u8 *mpp, struct ieee80211_sub_if_data *sdata) | |||
431 | return 0; | 482 | return 0; |
432 | 483 | ||
433 | err_exists: | 484 | err_exists: |
434 | spin_unlock(&mpp_paths->hashwlock[hash_idx]); | 485 | spin_unlock_bh(&mpp_paths->hashwlock[hash_idx]); |
435 | read_unlock(&pathtbl_resize_lock); | 486 | read_unlock_bh(&pathtbl_resize_lock); |
436 | kfree(new_node); | 487 | kfree(new_node); |
437 | err_node_alloc: | 488 | err_node_alloc: |
438 | kfree(new_mpath); | 489 | kfree(new_mpath); |
@@ -545,11 +596,11 @@ int mesh_path_del(u8 *addr, struct ieee80211_sub_if_data *sdata) | |||
545 | int hash_idx; | 596 | int hash_idx; |
546 | int err = 0; | 597 | int err = 0; |
547 | 598 | ||
548 | read_lock(&pathtbl_resize_lock); | 599 | read_lock_bh(&pathtbl_resize_lock); |
549 | hash_idx = mesh_table_hash(addr, sdata, mesh_paths); | 600 | hash_idx = mesh_table_hash(addr, sdata, mesh_paths); |
550 | bucket = &mesh_paths->hash_buckets[hash_idx]; | 601 | bucket = &mesh_paths->hash_buckets[hash_idx]; |
551 | 602 | ||
552 | spin_lock(&mesh_paths->hashwlock[hash_idx]); | 603 | spin_lock_bh(&mesh_paths->hashwlock[hash_idx]); |
553 | hlist_for_each_entry(node, n, bucket, list) { | 604 | hlist_for_each_entry(node, n, bucket, list) { |
554 | mpath = node->mpath; | 605 | mpath = node->mpath; |
555 | if (mpath->sdata == sdata && | 606 | if (mpath->sdata == sdata && |
@@ -567,8 +618,8 @@ int mesh_path_del(u8 *addr, struct ieee80211_sub_if_data *sdata) | |||
567 | err = -ENXIO; | 618 | err = -ENXIO; |
568 | enddel: | 619 | enddel: |
569 | mesh_paths_generation++; | 620 | mesh_paths_generation++; |
570 | spin_unlock(&mesh_paths->hashwlock[hash_idx]); | 621 | spin_unlock_bh(&mesh_paths->hashwlock[hash_idx]); |
571 | read_unlock(&pathtbl_resize_lock); | 622 | read_unlock_bh(&pathtbl_resize_lock); |
572 | return err; | 623 | return err; |
573 | } | 624 | } |
574 | 625 | ||
@@ -720,7 +771,7 @@ void mesh_path_expire(struct ieee80211_sub_if_data *sdata) | |||
720 | struct hlist_node *p; | 771 | struct hlist_node *p; |
721 | int i; | 772 | int i; |
722 | 773 | ||
723 | read_lock(&pathtbl_resize_lock); | 774 | read_lock_bh(&pathtbl_resize_lock); |
724 | for_each_mesh_entry(mesh_paths, p, node, i) { | 775 | for_each_mesh_entry(mesh_paths, p, node, i) { |
725 | if (node->mpath->sdata != sdata) | 776 | if (node->mpath->sdata != sdata) |
726 | continue; | 777 | continue; |
@@ -735,7 +786,7 @@ void mesh_path_expire(struct ieee80211_sub_if_data *sdata) | |||
735 | } else | 786 | } else |
736 | spin_unlock_bh(&mpath->state_lock); | 787 | spin_unlock_bh(&mpath->state_lock); |
737 | } | 788 | } |
738 | read_unlock(&pathtbl_resize_lock); | 789 | read_unlock_bh(&pathtbl_resize_lock); |
739 | } | 790 | } |
740 | 791 | ||
741 | void mesh_pathtbl_unregister(void) | 792 | void mesh_pathtbl_unregister(void) |