diff options
Diffstat (limited to 'net/mac80211/mesh_pathtbl.c')
-rw-r--r-- | net/mac80211/mesh_pathtbl.c | 211 |
1 files changed, 165 insertions, 46 deletions
diff --git a/net/mac80211/mesh_pathtbl.c b/net/mac80211/mesh_pathtbl.c index 838ee60492ad..3c72557df45a 100644 --- a/net/mac80211/mesh_pathtbl.c +++ b/net/mac80211/mesh_pathtbl.c | |||
@@ -9,7 +9,6 @@ | |||
9 | 9 | ||
10 | #include <linux/etherdevice.h> | 10 | #include <linux/etherdevice.h> |
11 | #include <linux/list.h> | 11 | #include <linux/list.h> |
12 | #include <linux/netdevice.h> | ||
13 | #include <linux/random.h> | 12 | #include <linux/random.h> |
14 | #include <linux/spinlock.h> | 13 | #include <linux/spinlock.h> |
15 | #include <linux/string.h> | 14 | #include <linux/string.h> |
@@ -37,6 +36,7 @@ struct mpath_node { | |||
37 | }; | 36 | }; |
38 | 37 | ||
39 | static struct mesh_table *mesh_paths; | 38 | static struct mesh_table *mesh_paths; |
39 | static struct mesh_table *mpp_paths; /* Store paths for MPP&MAP */ | ||
40 | 40 | ||
41 | /* This lock will have the grow table function as writer and add / delete nodes | 41 | /* This lock will have the grow table function as writer and add / delete nodes |
42 | * as readers. When reading the table (i.e. doing lookups) we are well protected | 42 | * as readers. When reading the table (i.e. doing lookups) we are well protected |
@@ -62,13 +62,13 @@ void mesh_path_assign_nexthop(struct mesh_path *mpath, struct sta_info *sta) | |||
62 | /** | 62 | /** |
63 | * mesh_path_lookup - look up a path in the mesh path table | 63 | * mesh_path_lookup - look up a path in the mesh path table |
64 | * @dst: hardware address (ETH_ALEN length) of destination | 64 | * @dst: hardware address (ETH_ALEN length) of destination |
65 | * @dev: local interface | 65 | * @sdata: local subif |
66 | * | 66 | * |
67 | * Returns: pointer to the mesh path structure, or NULL if not found | 67 | * Returns: pointer to the mesh path structure, or NULL if not found |
68 | * | 68 | * |
69 | * Locking: must be called within a read rcu section. | 69 | * Locking: must be called within a read rcu section. |
70 | */ | 70 | */ |
71 | struct mesh_path *mesh_path_lookup(u8 *dst, struct net_device *dev) | 71 | struct mesh_path *mesh_path_lookup(u8 *dst, struct ieee80211_sub_if_data *sdata) |
72 | { | 72 | { |
73 | struct mesh_path *mpath; | 73 | struct mesh_path *mpath; |
74 | struct hlist_node *n; | 74 | struct hlist_node *n; |
@@ -78,10 +78,10 @@ struct mesh_path *mesh_path_lookup(u8 *dst, struct net_device *dev) | |||
78 | 78 | ||
79 | tbl = rcu_dereference(mesh_paths); | 79 | tbl = rcu_dereference(mesh_paths); |
80 | 80 | ||
81 | bucket = &tbl->hash_buckets[mesh_table_hash(dst, dev, tbl)]; | 81 | bucket = &tbl->hash_buckets[mesh_table_hash(dst, sdata, tbl)]; |
82 | hlist_for_each_entry_rcu(node, n, bucket, list) { | 82 | hlist_for_each_entry_rcu(node, n, bucket, list) { |
83 | mpath = node->mpath; | 83 | mpath = node->mpath; |
84 | if (mpath->dev == dev && | 84 | if (mpath->sdata == sdata && |
85 | memcmp(dst, mpath->dst, ETH_ALEN) == 0) { | 85 | memcmp(dst, mpath->dst, ETH_ALEN) == 0) { |
86 | if (MPATH_EXPIRED(mpath)) { | 86 | if (MPATH_EXPIRED(mpath)) { |
87 | spin_lock_bh(&mpath->state_lock); | 87 | spin_lock_bh(&mpath->state_lock); |
@@ -95,16 +95,44 @@ struct mesh_path *mesh_path_lookup(u8 *dst, struct net_device *dev) | |||
95 | return NULL; | 95 | return NULL; |
96 | } | 96 | } |
97 | 97 | ||
98 | struct mesh_path *mpp_path_lookup(u8 *dst, struct ieee80211_sub_if_data *sdata) | ||
99 | { | ||
100 | struct mesh_path *mpath; | ||
101 | struct hlist_node *n; | ||
102 | struct hlist_head *bucket; | ||
103 | struct mesh_table *tbl; | ||
104 | struct mpath_node *node; | ||
105 | |||
106 | tbl = rcu_dereference(mpp_paths); | ||
107 | |||
108 | bucket = &tbl->hash_buckets[mesh_table_hash(dst, sdata, tbl)]; | ||
109 | hlist_for_each_entry_rcu(node, n, bucket, list) { | ||
110 | mpath = node->mpath; | ||
111 | if (mpath->sdata == sdata && | ||
112 | memcmp(dst, mpath->dst, ETH_ALEN) == 0) { | ||
113 | if (MPATH_EXPIRED(mpath)) { | ||
114 | spin_lock_bh(&mpath->state_lock); | ||
115 | if (MPATH_EXPIRED(mpath)) | ||
116 | mpath->flags &= ~MESH_PATH_ACTIVE; | ||
117 | spin_unlock_bh(&mpath->state_lock); | ||
118 | } | ||
119 | return mpath; | ||
120 | } | ||
121 | } | ||
122 | return NULL; | ||
123 | } | ||
124 | |||
125 | |||
98 | /** | 126 | /** |
99 | * mesh_path_lookup_by_idx - look up a path in the mesh path table by its index | 127 | * mesh_path_lookup_by_idx - look up a path in the mesh path table by its index |
100 | * @idx: index | 128 | * @idx: index |
101 | * @dev: local interface, or NULL for all entries | 129 | * @sdata: local subif, or NULL for all entries |
102 | * | 130 | * |
103 | * Returns: pointer to the mesh path structure, or NULL if not found. | 131 | * Returns: pointer to the mesh path structure, or NULL if not found. |
104 | * | 132 | * |
105 | * Locking: must be called within a read rcu section. | 133 | * Locking: must be called within a read rcu section. |
106 | */ | 134 | */ |
107 | struct mesh_path *mesh_path_lookup_by_idx(int idx, struct net_device *dev) | 135 | struct mesh_path *mesh_path_lookup_by_idx(int idx, struct ieee80211_sub_if_data *sdata) |
108 | { | 136 | { |
109 | struct mpath_node *node; | 137 | struct mpath_node *node; |
110 | struct hlist_node *p; | 138 | struct hlist_node *p; |
@@ -112,7 +140,7 @@ struct mesh_path *mesh_path_lookup_by_idx(int idx, struct net_device *dev) | |||
112 | int j = 0; | 140 | int j = 0; |
113 | 141 | ||
114 | for_each_mesh_entry(mesh_paths, p, node, i) { | 142 | for_each_mesh_entry(mesh_paths, p, node, i) { |
115 | if (dev && node->mpath->dev != dev) | 143 | if (sdata && node->mpath->sdata != sdata) |
116 | continue; | 144 | continue; |
117 | if (j++ == idx) { | 145 | if (j++ == idx) { |
118 | if (MPATH_EXPIRED(node->mpath)) { | 146 | if (MPATH_EXPIRED(node->mpath)) { |
@@ -131,15 +159,14 @@ struct mesh_path *mesh_path_lookup_by_idx(int idx, struct net_device *dev) | |||
131 | /** | 159 | /** |
132 | * mesh_path_add - allocate and add a new path to the mesh path table | 160 | * mesh_path_add - allocate and add a new path to the mesh path table |
133 | * @addr: destination address of the path (ETH_ALEN length) | 161 | * @addr: destination address of the path (ETH_ALEN length) |
134 | * @dev: local interface | 162 | * @sdata: local subif |
135 | * | 163 | * |
136 | * Returns: 0 on sucess | 164 | * Returns: 0 on sucess |
137 | * | 165 | * |
138 | * State: the initial state of the new path is set to 0 | 166 | * State: the initial state of the new path is set to 0 |
139 | */ | 167 | */ |
140 | int mesh_path_add(u8 *dst, struct net_device *dev) | 168 | int mesh_path_add(u8 *dst, struct ieee80211_sub_if_data *sdata) |
141 | { | 169 | { |
142 | struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); | ||
143 | struct mesh_path *mpath, *new_mpath; | 170 | struct mesh_path *mpath, *new_mpath; |
144 | struct mpath_node *node, *new_node; | 171 | struct mpath_node *node, *new_node; |
145 | struct hlist_head *bucket; | 172 | struct hlist_head *bucket; |
@@ -148,14 +175,14 @@ int mesh_path_add(u8 *dst, struct net_device *dev) | |||
148 | int err = 0; | 175 | int err = 0; |
149 | u32 hash_idx; | 176 | u32 hash_idx; |
150 | 177 | ||
151 | if (memcmp(dst, dev->dev_addr, ETH_ALEN) == 0) | 178 | if (memcmp(dst, sdata->dev->dev_addr, ETH_ALEN) == 0) |
152 | /* never add ourselves as neighbours */ | 179 | /* never add ourselves as neighbours */ |
153 | return -ENOTSUPP; | 180 | return -ENOTSUPP; |
154 | 181 | ||
155 | if (is_multicast_ether_addr(dst)) | 182 | if (is_multicast_ether_addr(dst)) |
156 | return -ENOTSUPP; | 183 | return -ENOTSUPP; |
157 | 184 | ||
158 | if (atomic_add_unless(&sdata->u.sta.mpaths, 1, MESH_MAX_MPATHS) == 0) | 185 | if (atomic_add_unless(&sdata->u.mesh.mpaths, 1, MESH_MAX_MPATHS) == 0) |
159 | return -ENOSPC; | 186 | return -ENOSPC; |
160 | 187 | ||
161 | err = -ENOMEM; | 188 | err = -ENOMEM; |
@@ -169,7 +196,7 @@ int mesh_path_add(u8 *dst, struct net_device *dev) | |||
169 | 196 | ||
170 | read_lock(&pathtbl_resize_lock); | 197 | read_lock(&pathtbl_resize_lock); |
171 | memcpy(new_mpath->dst, dst, ETH_ALEN); | 198 | memcpy(new_mpath->dst, dst, ETH_ALEN); |
172 | new_mpath->dev = dev; | 199 | new_mpath->sdata = sdata; |
173 | new_mpath->flags = 0; | 200 | new_mpath->flags = 0; |
174 | skb_queue_head_init(&new_mpath->frame_queue); | 201 | skb_queue_head_init(&new_mpath->frame_queue); |
175 | new_node->mpath = new_mpath; | 202 | new_node->mpath = new_mpath; |
@@ -179,7 +206,7 @@ int mesh_path_add(u8 *dst, struct net_device *dev) | |||
179 | spin_lock_init(&new_mpath->state_lock); | 206 | spin_lock_init(&new_mpath->state_lock); |
180 | init_timer(&new_mpath->timer); | 207 | init_timer(&new_mpath->timer); |
181 | 208 | ||
182 | hash_idx = mesh_table_hash(dst, dev, mesh_paths); | 209 | hash_idx = mesh_table_hash(dst, sdata, mesh_paths); |
183 | bucket = &mesh_paths->hash_buckets[hash_idx]; | 210 | bucket = &mesh_paths->hash_buckets[hash_idx]; |
184 | 211 | ||
185 | spin_lock(&mesh_paths->hashwlock[hash_idx]); | 212 | spin_lock(&mesh_paths->hashwlock[hash_idx]); |
@@ -187,7 +214,7 @@ int mesh_path_add(u8 *dst, struct net_device *dev) | |||
187 | err = -EEXIST; | 214 | err = -EEXIST; |
188 | hlist_for_each_entry(node, n, bucket, list) { | 215 | hlist_for_each_entry(node, n, bucket, list) { |
189 | mpath = node->mpath; | 216 | mpath = node->mpath; |
190 | if (mpath->dev == dev && memcmp(dst, mpath->dst, ETH_ALEN) == 0) | 217 | if (mpath->sdata == sdata && memcmp(dst, mpath->dst, ETH_ALEN) == 0) |
191 | goto err_exists; | 218 | goto err_exists; |
192 | } | 219 | } |
193 | 220 | ||
@@ -223,7 +250,92 @@ err_exists: | |||
223 | err_node_alloc: | 250 | err_node_alloc: |
224 | kfree(new_mpath); | 251 | kfree(new_mpath); |
225 | err_path_alloc: | 252 | err_path_alloc: |
226 | atomic_dec(&sdata->u.sta.mpaths); | 253 | atomic_dec(&sdata->u.mesh.mpaths); |
254 | return err; | ||
255 | } | ||
256 | |||
257 | |||
258 | int mpp_path_add(u8 *dst, u8 *mpp, struct ieee80211_sub_if_data *sdata) | ||
259 | { | ||
260 | struct mesh_path *mpath, *new_mpath; | ||
261 | struct mpath_node *node, *new_node; | ||
262 | struct hlist_head *bucket; | ||
263 | struct hlist_node *n; | ||
264 | int grow = 0; | ||
265 | int err = 0; | ||
266 | u32 hash_idx; | ||
267 | |||
268 | |||
269 | if (memcmp(dst, sdata->dev->dev_addr, ETH_ALEN) == 0) | ||
270 | /* never add ourselves as neighbours */ | ||
271 | return -ENOTSUPP; | ||
272 | |||
273 | if (is_multicast_ether_addr(dst)) | ||
274 | return -ENOTSUPP; | ||
275 | |||
276 | err = -ENOMEM; | ||
277 | new_mpath = kzalloc(sizeof(struct mesh_path), GFP_KERNEL); | ||
278 | if (!new_mpath) | ||
279 | goto err_path_alloc; | ||
280 | |||
281 | new_node = kmalloc(sizeof(struct mpath_node), GFP_KERNEL); | ||
282 | if (!new_node) | ||
283 | goto err_node_alloc; | ||
284 | |||
285 | read_lock(&pathtbl_resize_lock); | ||
286 | memcpy(new_mpath->dst, dst, ETH_ALEN); | ||
287 | memcpy(new_mpath->mpp, mpp, ETH_ALEN); | ||
288 | new_mpath->sdata = sdata; | ||
289 | new_mpath->flags = 0; | ||
290 | skb_queue_head_init(&new_mpath->frame_queue); | ||
291 | new_node->mpath = new_mpath; | ||
292 | new_mpath->exp_time = jiffies; | ||
293 | spin_lock_init(&new_mpath->state_lock); | ||
294 | |||
295 | hash_idx = mesh_table_hash(dst, sdata, mpp_paths); | ||
296 | bucket = &mpp_paths->hash_buckets[hash_idx]; | ||
297 | |||
298 | spin_lock(&mpp_paths->hashwlock[hash_idx]); | ||
299 | |||
300 | err = -EEXIST; | ||
301 | hlist_for_each_entry(node, n, bucket, list) { | ||
302 | mpath = node->mpath; | ||
303 | if (mpath->sdata == sdata && memcmp(dst, mpath->dst, ETH_ALEN) == 0) | ||
304 | goto err_exists; | ||
305 | } | ||
306 | |||
307 | hlist_add_head_rcu(&new_node->list, bucket); | ||
308 | if (atomic_inc_return(&mpp_paths->entries) >= | ||
309 | mpp_paths->mean_chain_len * (mpp_paths->hash_mask + 1)) | ||
310 | grow = 1; | ||
311 | |||
312 | spin_unlock(&mpp_paths->hashwlock[hash_idx]); | ||
313 | read_unlock(&pathtbl_resize_lock); | ||
314 | if (grow) { | ||
315 | struct mesh_table *oldtbl, *newtbl; | ||
316 | |||
317 | write_lock(&pathtbl_resize_lock); | ||
318 | oldtbl = mpp_paths; | ||
319 | newtbl = mesh_table_grow(mpp_paths); | ||
320 | if (!newtbl) { | ||
321 | write_unlock(&pathtbl_resize_lock); | ||
322 | return 0; | ||
323 | } | ||
324 | rcu_assign_pointer(mpp_paths, newtbl); | ||
325 | write_unlock(&pathtbl_resize_lock); | ||
326 | |||
327 | synchronize_rcu(); | ||
328 | mesh_table_free(oldtbl, false); | ||
329 | } | ||
330 | return 0; | ||
331 | |||
332 | err_exists: | ||
333 | spin_unlock(&mpp_paths->hashwlock[hash_idx]); | ||
334 | read_unlock(&pathtbl_resize_lock); | ||
335 | kfree(new_node); | ||
336 | err_node_alloc: | ||
337 | kfree(new_mpath); | ||
338 | err_path_alloc: | ||
227 | return err; | 339 | return err; |
228 | } | 340 | } |
229 | 341 | ||
@@ -241,7 +353,7 @@ void mesh_plink_broken(struct sta_info *sta) | |||
241 | struct mesh_path *mpath; | 353 | struct mesh_path *mpath; |
242 | struct mpath_node *node; | 354 | struct mpath_node *node; |
243 | struct hlist_node *p; | 355 | struct hlist_node *p; |
244 | struct net_device *dev = sta->sdata->dev; | 356 | struct ieee80211_sub_if_data *sdata = sta->sdata; |
245 | int i; | 357 | int i; |
246 | 358 | ||
247 | rcu_read_lock(); | 359 | rcu_read_lock(); |
@@ -256,7 +368,7 @@ void mesh_plink_broken(struct sta_info *sta) | |||
256 | spin_unlock_bh(&mpath->state_lock); | 368 | spin_unlock_bh(&mpath->state_lock); |
257 | mesh_path_error_tx(mpath->dst, | 369 | mesh_path_error_tx(mpath->dst, |
258 | cpu_to_le32(mpath->dsn), | 370 | cpu_to_le32(mpath->dsn), |
259 | dev->broadcast, dev); | 371 | sdata->dev->broadcast, sdata); |
260 | } else | 372 | } else |
261 | spin_unlock_bh(&mpath->state_lock); | 373 | spin_unlock_bh(&mpath->state_lock); |
262 | } | 374 | } |
@@ -284,11 +396,11 @@ void mesh_path_flush_by_nexthop(struct sta_info *sta) | |||
284 | for_each_mesh_entry(mesh_paths, p, node, i) { | 396 | for_each_mesh_entry(mesh_paths, p, node, i) { |
285 | mpath = node->mpath; | 397 | mpath = node->mpath; |
286 | if (mpath->next_hop == sta) | 398 | if (mpath->next_hop == sta) |
287 | mesh_path_del(mpath->dst, mpath->dev); | 399 | mesh_path_del(mpath->dst, mpath->sdata); |
288 | } | 400 | } |
289 | } | 401 | } |
290 | 402 | ||
291 | void mesh_path_flush(struct net_device *dev) | 403 | void mesh_path_flush(struct ieee80211_sub_if_data *sdata) |
292 | { | 404 | { |
293 | struct mesh_path *mpath; | 405 | struct mesh_path *mpath; |
294 | struct mpath_node *node; | 406 | struct mpath_node *node; |
@@ -297,19 +409,18 @@ void mesh_path_flush(struct net_device *dev) | |||
297 | 409 | ||
298 | for_each_mesh_entry(mesh_paths, p, node, i) { | 410 | for_each_mesh_entry(mesh_paths, p, node, i) { |
299 | mpath = node->mpath; | 411 | mpath = node->mpath; |
300 | if (mpath->dev == dev) | 412 | if (mpath->sdata == sdata) |
301 | mesh_path_del(mpath->dst, mpath->dev); | 413 | mesh_path_del(mpath->dst, mpath->sdata); |
302 | } | 414 | } |
303 | } | 415 | } |
304 | 416 | ||
305 | static void mesh_path_node_reclaim(struct rcu_head *rp) | 417 | static void mesh_path_node_reclaim(struct rcu_head *rp) |
306 | { | 418 | { |
307 | struct mpath_node *node = container_of(rp, struct mpath_node, rcu); | 419 | struct mpath_node *node = container_of(rp, struct mpath_node, rcu); |
308 | struct ieee80211_sub_if_data *sdata = | 420 | struct ieee80211_sub_if_data *sdata = node->mpath->sdata; |
309 | IEEE80211_DEV_TO_SUB_IF(node->mpath->dev); | ||
310 | 421 | ||
311 | del_timer_sync(&node->mpath->timer); | 422 | del_timer_sync(&node->mpath->timer); |
312 | atomic_dec(&sdata->u.sta.mpaths); | 423 | atomic_dec(&sdata->u.mesh.mpaths); |
313 | kfree(node->mpath); | 424 | kfree(node->mpath); |
314 | kfree(node); | 425 | kfree(node); |
315 | } | 426 | } |
@@ -318,11 +429,11 @@ static void mesh_path_node_reclaim(struct rcu_head *rp) | |||
318 | * mesh_path_del - delete a mesh path from the table | 429 | * mesh_path_del - delete a mesh path from the table |
319 | * | 430 | * |
320 | * @addr: dst address (ETH_ALEN length) | 431 | * @addr: dst address (ETH_ALEN length) |
321 | * @dev: local interface | 432 | * @sdata: local subif |
322 | * | 433 | * |
323 | * Returns: 0 if succesful | 434 | * Returns: 0 if succesful |
324 | */ | 435 | */ |
325 | int mesh_path_del(u8 *addr, struct net_device *dev) | 436 | int mesh_path_del(u8 *addr, struct ieee80211_sub_if_data *sdata) |
326 | { | 437 | { |
327 | struct mesh_path *mpath; | 438 | struct mesh_path *mpath; |
328 | struct mpath_node *node; | 439 | struct mpath_node *node; |
@@ -332,13 +443,13 @@ int mesh_path_del(u8 *addr, struct net_device *dev) | |||
332 | int err = 0; | 443 | int err = 0; |
333 | 444 | ||
334 | read_lock(&pathtbl_resize_lock); | 445 | read_lock(&pathtbl_resize_lock); |
335 | hash_idx = mesh_table_hash(addr, dev, mesh_paths); | 446 | hash_idx = mesh_table_hash(addr, sdata, mesh_paths); |
336 | bucket = &mesh_paths->hash_buckets[hash_idx]; | 447 | bucket = &mesh_paths->hash_buckets[hash_idx]; |
337 | 448 | ||
338 | spin_lock(&mesh_paths->hashwlock[hash_idx]); | 449 | spin_lock(&mesh_paths->hashwlock[hash_idx]); |
339 | hlist_for_each_entry(node, n, bucket, list) { | 450 | hlist_for_each_entry(node, n, bucket, list) { |
340 | mpath = node->mpath; | 451 | mpath = node->mpath; |
341 | if (mpath->dev == dev && | 452 | if (mpath->sdata == sdata && |
342 | memcmp(addr, mpath->dst, ETH_ALEN) == 0) { | 453 | memcmp(addr, mpath->dst, ETH_ALEN) == 0) { |
343 | spin_lock_bh(&mpath->state_lock); | 454 | spin_lock_bh(&mpath->state_lock); |
344 | mpath->flags |= MESH_PATH_RESOLVING; | 455 | mpath->flags |= MESH_PATH_RESOLVING; |
@@ -378,33 +489,33 @@ void mesh_path_tx_pending(struct mesh_path *mpath) | |||
378 | * mesh_path_discard_frame - discard a frame whose path could not be resolved | 489 | * mesh_path_discard_frame - discard a frame whose path could not be resolved |
379 | * | 490 | * |
380 | * @skb: frame to discard | 491 | * @skb: frame to discard |
381 | * @dev: network device the frame was to be sent through | 492 | * @sdata: network subif the frame was to be sent through |
382 | * | 493 | * |
383 | * If the frame was beign forwarded from another MP, a PERR frame will be sent | 494 | * If the frame was beign forwarded from another MP, a PERR frame will be sent |
384 | * to the precursor. | 495 | * to the precursor. |
385 | * | 496 | * |
386 | * Locking: the function must me called within a rcu_read_lock region | 497 | * Locking: the function must me called within a rcu_read_lock region |
387 | */ | 498 | */ |
388 | void mesh_path_discard_frame(struct sk_buff *skb, struct net_device *dev) | 499 | void mesh_path_discard_frame(struct sk_buff *skb, |
500 | struct ieee80211_sub_if_data *sdata) | ||
389 | { | 501 | { |
390 | struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); | ||
391 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; | 502 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; |
392 | struct mesh_path *mpath; | 503 | struct mesh_path *mpath; |
393 | u32 dsn = 0; | 504 | u32 dsn = 0; |
394 | 505 | ||
395 | if (memcmp(hdr->addr4, dev->dev_addr, ETH_ALEN) != 0) { | 506 | if (memcmp(hdr->addr4, sdata->dev->dev_addr, ETH_ALEN) != 0) { |
396 | u8 *ra, *da; | 507 | u8 *ra, *da; |
397 | 508 | ||
398 | da = hdr->addr3; | 509 | da = hdr->addr3; |
399 | ra = hdr->addr2; | 510 | ra = hdr->addr2; |
400 | mpath = mesh_path_lookup(da, dev); | 511 | mpath = mesh_path_lookup(da, sdata); |
401 | if (mpath) | 512 | if (mpath) |
402 | dsn = ++mpath->dsn; | 513 | dsn = ++mpath->dsn; |
403 | mesh_path_error_tx(skb->data, cpu_to_le32(dsn), ra, dev); | 514 | mesh_path_error_tx(skb->data, cpu_to_le32(dsn), ra, sdata); |
404 | } | 515 | } |
405 | 516 | ||
406 | kfree_skb(skb); | 517 | kfree_skb(skb); |
407 | sdata->u.sta.mshstats.dropped_frames_no_route++; | 518 | sdata->u.mesh.mshstats.dropped_frames_no_route++; |
408 | } | 519 | } |
409 | 520 | ||
410 | /** | 521 | /** |
@@ -416,14 +527,11 @@ void mesh_path_discard_frame(struct sk_buff *skb, struct net_device *dev) | |||
416 | */ | 527 | */ |
417 | void mesh_path_flush_pending(struct mesh_path *mpath) | 528 | void mesh_path_flush_pending(struct mesh_path *mpath) |
418 | { | 529 | { |
419 | struct ieee80211_sub_if_data *sdata; | ||
420 | struct sk_buff *skb; | 530 | struct sk_buff *skb; |
421 | 531 | ||
422 | sdata = IEEE80211_DEV_TO_SUB_IF(mpath->dev); | ||
423 | |||
424 | while ((skb = skb_dequeue(&mpath->frame_queue)) && | 532 | while ((skb = skb_dequeue(&mpath->frame_queue)) && |
425 | (mpath->flags & MESH_PATH_ACTIVE)) | 533 | (mpath->flags & MESH_PATH_ACTIVE)) |
426 | mesh_path_discard_frame(skb, mpath->dev); | 534 | mesh_path_discard_frame(skb, mpath->sdata); |
427 | } | 535 | } |
428 | 536 | ||
429 | /** | 537 | /** |
@@ -472,7 +580,7 @@ static int mesh_path_node_copy(struct hlist_node *p, struct mesh_table *newtbl) | |||
472 | node = hlist_entry(p, struct mpath_node, list); | 580 | node = hlist_entry(p, struct mpath_node, list); |
473 | mpath = node->mpath; | 581 | mpath = node->mpath; |
474 | new_node->mpath = mpath; | 582 | new_node->mpath = mpath; |
475 | hash_idx = mesh_table_hash(mpath->dst, mpath->dev, newtbl); | 583 | hash_idx = mesh_table_hash(mpath->dst, mpath->sdata, newtbl); |
476 | hlist_add_head(&new_node->list, | 584 | hlist_add_head(&new_node->list, |
477 | &newtbl->hash_buckets[hash_idx]); | 585 | &newtbl->hash_buckets[hash_idx]); |
478 | return 0; | 586 | return 0; |
@@ -481,15 +589,25 @@ static int mesh_path_node_copy(struct hlist_node *p, struct mesh_table *newtbl) | |||
481 | int mesh_pathtbl_init(void) | 589 | int mesh_pathtbl_init(void) |
482 | { | 590 | { |
483 | mesh_paths = mesh_table_alloc(INIT_PATHS_SIZE_ORDER); | 591 | mesh_paths = mesh_table_alloc(INIT_PATHS_SIZE_ORDER); |
592 | if (!mesh_paths) | ||
593 | return -ENOMEM; | ||
484 | mesh_paths->free_node = &mesh_path_node_free; | 594 | mesh_paths->free_node = &mesh_path_node_free; |
485 | mesh_paths->copy_node = &mesh_path_node_copy; | 595 | mesh_paths->copy_node = &mesh_path_node_copy; |
486 | mesh_paths->mean_chain_len = MEAN_CHAIN_LEN; | 596 | mesh_paths->mean_chain_len = MEAN_CHAIN_LEN; |
487 | if (!mesh_paths) | 597 | |
598 | mpp_paths = mesh_table_alloc(INIT_PATHS_SIZE_ORDER); | ||
599 | if (!mpp_paths) { | ||
600 | mesh_table_free(mesh_paths, true); | ||
488 | return -ENOMEM; | 601 | return -ENOMEM; |
602 | } | ||
603 | mpp_paths->free_node = &mesh_path_node_free; | ||
604 | mpp_paths->copy_node = &mesh_path_node_copy; | ||
605 | mpp_paths->mean_chain_len = MEAN_CHAIN_LEN; | ||
606 | |||
489 | return 0; | 607 | return 0; |
490 | } | 608 | } |
491 | 609 | ||
492 | void mesh_path_expire(struct net_device *dev) | 610 | void mesh_path_expire(struct ieee80211_sub_if_data *sdata) |
493 | { | 611 | { |
494 | struct mesh_path *mpath; | 612 | struct mesh_path *mpath; |
495 | struct mpath_node *node; | 613 | struct mpath_node *node; |
@@ -498,7 +616,7 @@ void mesh_path_expire(struct net_device *dev) | |||
498 | 616 | ||
499 | read_lock(&pathtbl_resize_lock); | 617 | read_lock(&pathtbl_resize_lock); |
500 | for_each_mesh_entry(mesh_paths, p, node, i) { | 618 | for_each_mesh_entry(mesh_paths, p, node, i) { |
501 | if (node->mpath->dev != dev) | 619 | if (node->mpath->sdata != sdata) |
502 | continue; | 620 | continue; |
503 | mpath = node->mpath; | 621 | mpath = node->mpath; |
504 | spin_lock_bh(&mpath->state_lock); | 622 | spin_lock_bh(&mpath->state_lock); |
@@ -507,7 +625,7 @@ void mesh_path_expire(struct net_device *dev) | |||
507 | time_after(jiffies, | 625 | time_after(jiffies, |
508 | mpath->exp_time + MESH_PATH_EXPIRE)) { | 626 | mpath->exp_time + MESH_PATH_EXPIRE)) { |
509 | spin_unlock_bh(&mpath->state_lock); | 627 | spin_unlock_bh(&mpath->state_lock); |
510 | mesh_path_del(mpath->dst, mpath->dev); | 628 | mesh_path_del(mpath->dst, mpath->sdata); |
511 | } else | 629 | } else |
512 | spin_unlock_bh(&mpath->state_lock); | 630 | spin_unlock_bh(&mpath->state_lock); |
513 | } | 631 | } |
@@ -517,4 +635,5 @@ void mesh_path_expire(struct net_device *dev) | |||
517 | void mesh_pathtbl_unregister(void) | 635 | void mesh_pathtbl_unregister(void) |
518 | { | 636 | { |
519 | mesh_table_free(mesh_paths, true); | 637 | mesh_table_free(mesh_paths, true); |
638 | mesh_table_free(mpp_paths, true); | ||
520 | } | 639 | } |