aboutsummaryrefslogtreecommitdiffstats
path: root/net/mac80211/mesh_pathtbl.c
diff options
context:
space:
mode:
authorcozybit Inc <javier@cozybit.com>2011-04-13 14:10:28 -0400
committerJohn W. Linville <linville@tuxdriver.com>2011-04-13 15:24:48 -0400
commita3e6b12c0232748658a602eda39f12fddb254ba8 (patch)
tree6e03e47f41a6c1e9ede9abc05741226235344a3f /net/mac80211/mesh_pathtbl.c
parent09d5b94d2cbc6c3ebb70a9a318f6390d0b4cf010 (diff)
mac80211: Allocate new mesh path and portal tables before taking locks
It is unnecessary to hold the path table resize lock while allocating a new table. Allocate first and take lock later. This resolves a soft-lockup: [ 293.385799] BUG: soft lockup - CPU#0 stuck for 61s! [kworker/u:3:744] (...) [ 293.386049] Call Trace: [ 293.386049] [<c119fd04>] do_raw_read_lock+0x26/0x29 [ 293.386049] [<c14b2982>] _raw_read_lock+0x8/0xa [ 293.386049] [<c148c178>] mesh_path_add+0xb7/0x24e [ 293.386049] [<c148b98d>] ? mesh_path_lookup+0x1b/0xa6 [ 293.386049] [<c148ded5>] hwmp_route_info_get+0x276/0x2fd [ 293.386049] [<c148dfb6>] mesh_rx_path_sel_frame+0x5a/0x5d9 [ 293.386049] [<c102667d>] ? update_curr+0x1cf/0x1d7 [ 293.386049] [<c148b45a>] ieee80211_mesh_rx_queued_mgmt+0x60/0x67 [ 293.386049] [<c147c374>] ieee80211_iface_work+0x1f0/0x258 (...) Signed-off-by: Javier Cardona <javier@cozybit.com> Signed-off-by: John W. Linville <linville@tuxdriver.com>
Diffstat (limited to 'net/mac80211/mesh_pathtbl.c')
-rw-r--r--net/mac80211/mesh_pathtbl.c49
1 files changed, 25 insertions, 24 deletions
diff --git a/net/mac80211/mesh_pathtbl.c b/net/mac80211/mesh_pathtbl.c
index 8d65b47d9837..7776ae5a8f15 100644
--- a/net/mac80211/mesh_pathtbl.c
+++ b/net/mac80211/mesh_pathtbl.c
@@ -65,42 +65,37 @@ void mesh_table_free(struct mesh_table *tbl, bool free_leafs)
65 __mesh_table_free(tbl); 65 __mesh_table_free(tbl);
66} 66}
67 67
68static struct mesh_table *mesh_table_grow(struct mesh_table *tbl) 68static int mesh_table_grow(struct mesh_table *oldtbl,
69 struct mesh_table *newtbl)
69{ 70{
70 struct mesh_table *newtbl;
71 struct hlist_head *oldhash; 71 struct hlist_head *oldhash;
72 struct hlist_node *p, *q; 72 struct hlist_node *p, *q;
73 int i; 73 int i;
74 74
75 if (atomic_read(&tbl->entries) 75 if (atomic_read(&oldtbl->entries)
76 < tbl->mean_chain_len * (tbl->hash_mask + 1)) 76 < oldtbl->mean_chain_len * (oldtbl->hash_mask + 1))
77 goto endgrow; 77 return -EAGAIN;
78 78
79 newtbl = mesh_table_alloc(tbl->size_order + 1);
80 if (!newtbl)
81 goto endgrow;
82 79
83 newtbl->free_node = tbl->free_node; 80 newtbl->free_node = oldtbl->free_node;
84 newtbl->mean_chain_len = tbl->mean_chain_len; 81 newtbl->mean_chain_len = oldtbl->mean_chain_len;
85 newtbl->copy_node = tbl->copy_node; 82 newtbl->copy_node = oldtbl->copy_node;
86 atomic_set(&newtbl->entries, atomic_read(&tbl->entries)); 83 atomic_set(&newtbl->entries, atomic_read(&oldtbl->entries));
87 84
88 oldhash = tbl->hash_buckets; 85 oldhash = oldtbl->hash_buckets;
89 for (i = 0; i <= tbl->hash_mask; i++) 86 for (i = 0; i <= oldtbl->hash_mask; i++)
90 hlist_for_each(p, &oldhash[i]) 87 hlist_for_each(p, &oldhash[i])
91 if (tbl->copy_node(p, newtbl) < 0) 88 if (oldtbl->copy_node(p, newtbl) < 0)
92 goto errcopy; 89 goto errcopy;
93 90
94 return newtbl; 91 return 0;
95 92
96errcopy: 93errcopy:
97 for (i = 0; i <= newtbl->hash_mask; i++) { 94 for (i = 0; i <= newtbl->hash_mask; i++) {
98 hlist_for_each_safe(p, q, &newtbl->hash_buckets[i]) 95 hlist_for_each_safe(p, q, &newtbl->hash_buckets[i])
99 tbl->free_node(p, 0); 96 oldtbl->free_node(p, 0);
100 } 97 }
101 __mesh_table_free(newtbl); 98 return -ENOMEM;
102endgrow:
103 return NULL;
104} 99}
105 100
106 101
@@ -334,10 +329,13 @@ void mesh_mpath_table_grow(void)
334{ 329{
335 struct mesh_table *oldtbl, *newtbl; 330 struct mesh_table *oldtbl, *newtbl;
336 331
332 newtbl = mesh_table_alloc(mesh_paths->size_order + 1);
333 if (!newtbl)
334 return;
337 write_lock(&pathtbl_resize_lock); 335 write_lock(&pathtbl_resize_lock);
338 oldtbl = mesh_paths; 336 oldtbl = mesh_paths;
339 newtbl = mesh_table_grow(mesh_paths); 337 if (mesh_table_grow(mesh_paths, newtbl) < 0) {
340 if (!newtbl) { 338 __mesh_table_free(newtbl);
341 write_unlock(&pathtbl_resize_lock); 339 write_unlock(&pathtbl_resize_lock);
342 return; 340 return;
343 } 341 }
@@ -352,10 +350,13 @@ void mesh_mpp_table_grow(void)
352{ 350{
353 struct mesh_table *oldtbl, *newtbl; 351 struct mesh_table *oldtbl, *newtbl;
354 352
353 newtbl = mesh_table_alloc(mpp_paths->size_order + 1);
354 if (!newtbl)
355 return;
355 write_lock(&pathtbl_resize_lock); 356 write_lock(&pathtbl_resize_lock);
356 oldtbl = mpp_paths; 357 oldtbl = mpp_paths;
357 newtbl = mesh_table_grow(mpp_paths); 358 if (mesh_table_grow(mpp_paths, newtbl) < 0) {
358 if (!newtbl) { 359 __mesh_table_free(newtbl);
359 write_unlock(&pathtbl_resize_lock); 360 write_unlock(&pathtbl_resize_lock);
360 return; 361 return;
361 } 362 }