diff options
author | Pavel Emelyanov <xemul@openvz.org> | 2008-05-06 10:53:43 -0400 |
---|---|---|
committer | John W. Linville <linville@tuxdriver.com> | 2008-05-21 21:47:41 -0400 |
commit | 402d7752ed253369b7ab037e2d778e52d59c19ed (patch) | |
tree | c15299b42e01f86271192b0789a646d52834096e | |
parent | b679aeb304e3070626750c15e043a40da0e942fc (diff) |
mac80211: Brush up error paths in mesh_path_add.
There are already tree paths, that do incremental rollbacks, so
merge them together, rename labels and format the code to look a
bit nicer.
(I do not mind dropping/delaying this patch however).
Signed-off-by: Pavel Emelyanov <xemul@openvz.org>
Signed-off-by: John W. Linville <linville@tuxdriver.com>
-rw-r--r-- | net/mac80211/mesh_pathtbl.c | 42 |
1 files changed, 20 insertions, 22 deletions
diff --git a/net/mac80211/mesh_pathtbl.c b/net/mac80211/mesh_pathtbl.c index 99c2d360888e..7097ef981991 100644 --- a/net/mac80211/mesh_pathtbl.c +++ b/net/mac80211/mesh_pathtbl.c | |||
@@ -158,19 +158,14 @@ int mesh_path_add(u8 *dst, struct net_device *dev) | |||
158 | if (atomic_add_unless(&sdata->u.sta.mpaths, 1, MESH_MAX_MPATHS) == 0) | 158 | if (atomic_add_unless(&sdata->u.sta.mpaths, 1, MESH_MAX_MPATHS) == 0) |
159 | return -ENOSPC; | 159 | return -ENOSPC; |
160 | 160 | ||
161 | err = -ENOMEM; | ||
161 | new_mpath = kzalloc(sizeof(struct mesh_path), GFP_KERNEL); | 162 | new_mpath = kzalloc(sizeof(struct mesh_path), GFP_KERNEL); |
162 | if (!new_mpath) { | 163 | if (!new_mpath) |
163 | atomic_dec(&sdata->u.sta.mpaths); | 164 | goto err_path_alloc; |
164 | err = -ENOMEM; | 165 | |
165 | goto endadd2; | ||
166 | } | ||
167 | new_node = kmalloc(sizeof(struct mpath_node), GFP_KERNEL); | 166 | new_node = kmalloc(sizeof(struct mpath_node), GFP_KERNEL); |
168 | if (!new_node) { | 167 | if (!new_node) |
169 | kfree(new_mpath); | 168 | goto err_node_alloc; |
170 | atomic_dec(&sdata->u.sta.mpaths); | ||
171 | err = -ENOMEM; | ||
172 | goto endadd2; | ||
173 | } | ||
174 | 169 | ||
175 | read_lock(&pathtbl_resize_lock); | 170 | read_lock(&pathtbl_resize_lock); |
176 | memcpy(new_mpath->dst, dst, ETH_ALEN); | 171 | memcpy(new_mpath->dst, dst, ETH_ALEN); |
@@ -189,16 +184,11 @@ int mesh_path_add(u8 *dst, struct net_device *dev) | |||
189 | 184 | ||
190 | spin_lock(&mesh_paths->hashwlock[hash_idx]); | 185 | spin_lock(&mesh_paths->hashwlock[hash_idx]); |
191 | 186 | ||
187 | err = -EEXIST; | ||
192 | hlist_for_each_entry(node, n, bucket, list) { | 188 | hlist_for_each_entry(node, n, bucket, list) { |
193 | mpath = node->mpath; | 189 | mpath = node->mpath; |
194 | if (mpath->dev == dev && memcmp(dst, mpath->dst, ETH_ALEN) | 190 | if (mpath->dev == dev && memcmp(dst, mpath->dst, ETH_ALEN) == 0) |
195 | == 0) { | 191 | goto err_exists; |
196 | err = -EEXIST; | ||
197 | atomic_dec(&sdata->u.sta.mpaths); | ||
198 | kfree(new_node); | ||
199 | kfree(new_mpath); | ||
200 | goto endadd; | ||
201 | } | ||
202 | } | 192 | } |
203 | 193 | ||
204 | hlist_add_head_rcu(&new_node->list, bucket); | 194 | hlist_add_head_rcu(&new_node->list, bucket); |
@@ -206,10 +196,9 @@ int mesh_path_add(u8 *dst, struct net_device *dev) | |||
206 | mesh_paths->mean_chain_len * (mesh_paths->hash_mask + 1)) | 196 | mesh_paths->mean_chain_len * (mesh_paths->hash_mask + 1)) |
207 | grow = 1; | 197 | grow = 1; |
208 | 198 | ||
209 | endadd: | ||
210 | spin_unlock(&mesh_paths->hashwlock[hash_idx]); | 199 | spin_unlock(&mesh_paths->hashwlock[hash_idx]); |
211 | read_unlock(&pathtbl_resize_lock); | 200 | read_unlock(&pathtbl_resize_lock); |
212 | if (!err && grow) { | 201 | if (grow) { |
213 | struct mesh_table *oldtbl, *newtbl; | 202 | struct mesh_table *oldtbl, *newtbl; |
214 | 203 | ||
215 | write_lock(&pathtbl_resize_lock); | 204 | write_lock(&pathtbl_resize_lock); |
@@ -225,7 +214,16 @@ endadd: | |||
225 | synchronize_rcu(); | 214 | synchronize_rcu(); |
226 | mesh_table_free(oldtbl, false); | 215 | mesh_table_free(oldtbl, false); |
227 | } | 216 | } |
228 | endadd2: | 217 | return 0; |
218 | |||
219 | err_exists: | ||
220 | spin_unlock(&mesh_paths->hashwlock[hash_idx]); | ||
221 | read_unlock(&pathtbl_resize_lock); | ||
222 | kfree(new_node); | ||
223 | err_node_alloc: | ||
224 | kfree(new_mpath); | ||
225 | err_path_alloc: | ||
226 | atomic_dec(&sdata->u.sta.mpaths); | ||
229 | return err; | 227 | return err; |
230 | } | 228 | } |
231 | 229 | ||