diff options
Diffstat (limited to 'net/mac80211/mesh_pathtbl.c')
-rw-r--r-- | net/mac80211/mesh_pathtbl.c | 516 |
1 files changed, 516 insertions, 0 deletions
diff --git a/net/mac80211/mesh_pathtbl.c b/net/mac80211/mesh_pathtbl.c new file mode 100644 index 000000000000..5845dc21ce85 --- /dev/null +++ b/net/mac80211/mesh_pathtbl.c | |||
@@ -0,0 +1,516 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2008 open80211s Ltd. | ||
3 | * Author: Luis Carlos Cobo <luisca@cozybit.com> | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or modify | ||
6 | * it under the terms of the GNU General Public License version 2 as | ||
7 | * published by the Free Software Foundation. | ||
8 | */ | ||
9 | |||
10 | #include <linux/etherdevice.h> | ||
11 | #include <linux/list.h> | ||
12 | #include <linux/netdevice.h> | ||
13 | #include <linux/random.h> | ||
14 | #include <linux/spinlock.h> | ||
15 | #include <linux/string.h> | ||
16 | #include <net/mac80211.h> | ||
17 | #include "ieee80211_i.h" | ||
18 | #include "mesh.h" | ||
19 | |||
20 | /* There will be initially 2^INIT_PATHS_SIZE_ORDER buckets */ | ||
21 | #define INIT_PATHS_SIZE_ORDER 2 | ||
22 | |||
23 | /* Keep the mean chain length below this constant */ | ||
24 | #define MEAN_CHAIN_LEN 2 | ||
25 | |||
26 | #define MPATH_EXPIRED(mpath) ((mpath->flags & MESH_PATH_ACTIVE) && \ | ||
27 | time_after(jiffies, mpath->exp_time) && \ | ||
28 | !(mpath->flags & MESH_PATH_FIXED)) | ||
29 | |||
30 | struct mpath_node { | ||
31 | struct hlist_node list; | ||
32 | struct rcu_head rcu; | ||
33 | /* This indirection allows two different tables to point to the same | ||
34 | * mesh_path structure, useful when resizing | ||
35 | */ | ||
36 | struct mesh_path *mpath; | ||
37 | }; | ||
38 | |||
39 | static struct mesh_table *mesh_paths; | ||
40 | |||
41 | /* This lock will have the grow table function as writer and add / delete nodes | ||
42 | * as readers. When reading the table (i.e. doing lookups) we are well protected | ||
43 | * by RCU | ||
44 | */ | ||
45 | static DEFINE_RWLOCK(pathtbl_resize_lock); | ||
46 | |||
47 | /** | ||
48 | * | ||
49 | * mesh_path_assign_nexthop - update mesh path next hop | ||
50 | * | ||
51 | * @mpath: mesh path to update | ||
52 | * @sta: next hop to assign | ||
53 | * | ||
54 | * Locking: mpath->state_lock must be held when calling this function | ||
55 | */ | ||
56 | void mesh_path_assign_nexthop(struct mesh_path *mpath, struct sta_info *sta) | ||
57 | { | ||
58 | rcu_assign_pointer(mpath->next_hop, sta); | ||
59 | } | ||
60 | |||
61 | |||
62 | /** | ||
63 | * mesh_path_lookup - look up a path in the mesh path table | ||
64 | * @dst: hardware address (ETH_ALEN length) of destination | ||
65 | * @dev: local interface | ||
66 | * | ||
67 | * Returns: pointer to the mesh path structure, or NULL if not found | ||
68 | * | ||
69 | * Locking: must be called within a read rcu section. | ||
70 | */ | ||
71 | struct mesh_path *mesh_path_lookup(u8 *dst, struct net_device *dev) | ||
72 | { | ||
73 | struct mesh_path *mpath; | ||
74 | struct hlist_node *n; | ||
75 | struct hlist_head *bucket; | ||
76 | struct mesh_table *tbl; | ||
77 | struct mpath_node *node; | ||
78 | |||
79 | tbl = rcu_dereference(mesh_paths); | ||
80 | |||
81 | bucket = &tbl->hash_buckets[mesh_table_hash(dst, dev, tbl)]; | ||
82 | hlist_for_each_entry_rcu(node, n, bucket, list) { | ||
83 | mpath = node->mpath; | ||
84 | if (mpath->dev == dev && | ||
85 | memcmp(dst, mpath->dst, ETH_ALEN) == 0) { | ||
86 | if (MPATH_EXPIRED(mpath)) { | ||
87 | spin_lock_bh(&mpath->state_lock); | ||
88 | if (MPATH_EXPIRED(mpath)) | ||
89 | mpath->flags &= ~MESH_PATH_ACTIVE; | ||
90 | spin_unlock_bh(&mpath->state_lock); | ||
91 | } | ||
92 | return mpath; | ||
93 | } | ||
94 | } | ||
95 | return NULL; | ||
96 | } | ||
97 | |||
98 | /** | ||
99 | * mesh_path_lookup_by_idx - look up a path in the mesh path table by its index | ||
100 | * @idx: index | ||
101 | * @dev: local interface, or NULL for all entries | ||
102 | * | ||
103 | * Returns: pointer to the mesh path structure, or NULL if not found. | ||
104 | * | ||
105 | * Locking: must be called within a read rcu section. | ||
106 | */ | ||
107 | struct mesh_path *mesh_path_lookup_by_idx(int idx, struct net_device *dev) | ||
108 | { | ||
109 | struct mpath_node *node; | ||
110 | struct hlist_node *p; | ||
111 | int i; | ||
112 | int j = 0; | ||
113 | |||
114 | for_each_mesh_entry(mesh_paths, p, node, i) { | ||
115 | if (dev && node->mpath->dev != dev) | ||
116 | continue; | ||
117 | if (j++ == idx) { | ||
118 | if (MPATH_EXPIRED(node->mpath)) { | ||
119 | spin_lock_bh(&node->mpath->state_lock); | ||
120 | if (MPATH_EXPIRED(node->mpath)) | ||
121 | node->mpath->flags &= ~MESH_PATH_ACTIVE; | ||
122 | spin_unlock_bh(&node->mpath->state_lock); | ||
123 | } | ||
124 | return node->mpath; | ||
125 | } | ||
126 | } | ||
127 | |||
128 | return NULL; | ||
129 | } | ||
130 | |||
131 | /** | ||
132 | * mesh_path_add - allocate and add a new path to the mesh path table | ||
133 | * @addr: destination address of the path (ETH_ALEN length) | ||
134 | * @dev: local interface | ||
135 | * | ||
136 | * Returns: 0 on sucess | ||
137 | * | ||
138 | * State: the initial state of the new path is set to 0 | ||
139 | */ | ||
140 | int mesh_path_add(u8 *dst, struct net_device *dev) | ||
141 | { | ||
142 | struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); | ||
143 | struct mesh_path *mpath, *new_mpath; | ||
144 | struct mpath_node *node, *new_node; | ||
145 | struct hlist_head *bucket; | ||
146 | struct hlist_node *n; | ||
147 | int grow = 0; | ||
148 | int err = 0; | ||
149 | u32 hash_idx; | ||
150 | |||
151 | if (memcmp(dst, dev->dev_addr, ETH_ALEN) == 0) | ||
152 | /* never add ourselves as neighbours */ | ||
153 | return -ENOTSUPP; | ||
154 | |||
155 | if (is_multicast_ether_addr(dst)) | ||
156 | return -ENOTSUPP; | ||
157 | |||
158 | if (atomic_add_unless(&sdata->u.sta.mpaths, 1, MESH_MAX_MPATHS) == 0) | ||
159 | return -ENOSPC; | ||
160 | |||
161 | read_lock(&pathtbl_resize_lock); | ||
162 | |||
163 | new_mpath = kzalloc(sizeof(struct mesh_path), GFP_KERNEL); | ||
164 | if (!new_mpath) { | ||
165 | atomic_dec(&sdata->u.sta.mpaths); | ||
166 | err = -ENOMEM; | ||
167 | goto endadd2; | ||
168 | } | ||
169 | memcpy(new_mpath->dst, dst, ETH_ALEN); | ||
170 | new_mpath->dev = dev; | ||
171 | new_mpath->flags = 0; | ||
172 | skb_queue_head_init(&new_mpath->frame_queue); | ||
173 | new_node = kmalloc(sizeof(struct mpath_node), GFP_KERNEL); | ||
174 | new_node->mpath = new_mpath; | ||
175 | new_mpath->timer.data = (unsigned long) new_mpath; | ||
176 | new_mpath->timer.function = mesh_path_timer; | ||
177 | new_mpath->exp_time = jiffies; | ||
178 | spin_lock_init(&new_mpath->state_lock); | ||
179 | init_timer(&new_mpath->timer); | ||
180 | |||
181 | hash_idx = mesh_table_hash(dst, dev, mesh_paths); | ||
182 | bucket = &mesh_paths->hash_buckets[hash_idx]; | ||
183 | |||
184 | spin_lock(&mesh_paths->hashwlock[hash_idx]); | ||
185 | |||
186 | hlist_for_each_entry(node, n, bucket, list) { | ||
187 | mpath = node->mpath; | ||
188 | if (mpath->dev == dev && memcmp(dst, mpath->dst, ETH_ALEN) | ||
189 | == 0) { | ||
190 | err = -EEXIST; | ||
191 | atomic_dec(&sdata->u.sta.mpaths); | ||
192 | kfree(new_node); | ||
193 | kfree(new_mpath); | ||
194 | goto endadd; | ||
195 | } | ||
196 | } | ||
197 | |||
198 | hlist_add_head_rcu(&new_node->list, bucket); | ||
199 | if (atomic_inc_return(&mesh_paths->entries) >= | ||
200 | mesh_paths->mean_chain_len * (mesh_paths->hash_mask + 1)) | ||
201 | grow = 1; | ||
202 | |||
203 | endadd: | ||
204 | spin_unlock(&mesh_paths->hashwlock[hash_idx]); | ||
205 | endadd2: | ||
206 | read_unlock(&pathtbl_resize_lock); | ||
207 | if (!err && grow) { | ||
208 | struct mesh_table *oldtbl, *newtbl; | ||
209 | |||
210 | write_lock(&pathtbl_resize_lock); | ||
211 | oldtbl = mesh_paths; | ||
212 | newtbl = mesh_table_grow(mesh_paths); | ||
213 | if (!newtbl) { | ||
214 | write_unlock(&pathtbl_resize_lock); | ||
215 | return -ENOMEM; | ||
216 | } | ||
217 | rcu_assign_pointer(mesh_paths, newtbl); | ||
218 | synchronize_rcu(); | ||
219 | mesh_table_free(oldtbl, false); | ||
220 | write_unlock(&pathtbl_resize_lock); | ||
221 | } | ||
222 | return err; | ||
223 | } | ||
224 | |||
225 | |||
226 | /** | ||
227 | * mesh_plink_broken - deactivates paths and sends perr when a link breaks | ||
228 | * | ||
229 | * @sta: broken peer link | ||
230 | * | ||
231 | * This function must be called from the rate control algorithm if enough | ||
232 | * delivery errors suggest that a peer link is no longer usable. | ||
233 | */ | ||
234 | void mesh_plink_broken(struct sta_info *sta) | ||
235 | { | ||
236 | struct mesh_path *mpath; | ||
237 | struct mpath_node *node; | ||
238 | struct hlist_node *p; | ||
239 | struct net_device *dev = sta->sdata->dev; | ||
240 | int i; | ||
241 | |||
242 | rcu_read_lock(); | ||
243 | for_each_mesh_entry(mesh_paths, p, node, i) { | ||
244 | mpath = node->mpath; | ||
245 | spin_lock_bh(&mpath->state_lock); | ||
246 | if (mpath->next_hop == sta && | ||
247 | mpath->flags & MESH_PATH_ACTIVE && | ||
248 | !(mpath->flags & MESH_PATH_FIXED)) { | ||
249 | mpath->flags &= ~MESH_PATH_ACTIVE; | ||
250 | ++mpath->dsn; | ||
251 | spin_unlock_bh(&mpath->state_lock); | ||
252 | mesh_path_error_tx(mpath->dst, | ||
253 | cpu_to_le32(mpath->dsn), | ||
254 | dev->broadcast, dev); | ||
255 | } else | ||
256 | spin_unlock_bh(&mpath->state_lock); | ||
257 | } | ||
258 | rcu_read_unlock(); | ||
259 | } | ||
260 | EXPORT_SYMBOL(mesh_plink_broken); | ||
261 | |||
262 | /** | ||
263 | * mesh_path_flush_by_nexthop - Deletes mesh paths if their next hop matches | ||
264 | * | ||
265 | * @sta - mesh peer to match | ||
266 | * | ||
267 | * RCU notes: this function is called when a mesh plink transitions from | ||
268 | * PLINK_ESTAB to any other state, since PLINK_ESTAB state is the only one that | ||
269 | * allows path creation. This will happen before the sta can be freed (because | ||
270 | * sta_info_destroy() calls this) so any reader in a rcu read block will be | ||
271 | * protected against the plink disappearing. | ||
272 | */ | ||
273 | void mesh_path_flush_by_nexthop(struct sta_info *sta) | ||
274 | { | ||
275 | struct mesh_path *mpath; | ||
276 | struct mpath_node *node; | ||
277 | struct hlist_node *p; | ||
278 | int i; | ||
279 | |||
280 | for_each_mesh_entry(mesh_paths, p, node, i) { | ||
281 | mpath = node->mpath; | ||
282 | if (mpath->next_hop == sta) | ||
283 | mesh_path_del(mpath->dst, mpath->dev); | ||
284 | } | ||
285 | } | ||
286 | |||
287 | void mesh_path_flush(struct net_device *dev) | ||
288 | { | ||
289 | struct mesh_path *mpath; | ||
290 | struct mpath_node *node; | ||
291 | struct hlist_node *p; | ||
292 | int i; | ||
293 | |||
294 | for_each_mesh_entry(mesh_paths, p, node, i) { | ||
295 | mpath = node->mpath; | ||
296 | if (mpath->dev == dev) | ||
297 | mesh_path_del(mpath->dst, mpath->dev); | ||
298 | } | ||
299 | } | ||
300 | |||
301 | static void mesh_path_node_reclaim(struct rcu_head *rp) | ||
302 | { | ||
303 | struct mpath_node *node = container_of(rp, struct mpath_node, rcu); | ||
304 | struct ieee80211_sub_if_data *sdata = | ||
305 | IEEE80211_DEV_TO_SUB_IF(node->mpath->dev); | ||
306 | |||
307 | del_timer_sync(&node->mpath->timer); | ||
308 | atomic_dec(&sdata->u.sta.mpaths); | ||
309 | kfree(node->mpath); | ||
310 | kfree(node); | ||
311 | } | ||
312 | |||
313 | /** | ||
314 | * mesh_path_del - delete a mesh path from the table | ||
315 | * | ||
316 | * @addr: dst address (ETH_ALEN length) | ||
317 | * @dev: local interface | ||
318 | * | ||
319 | * Returns: 0 if succesful | ||
320 | */ | ||
321 | int mesh_path_del(u8 *addr, struct net_device *dev) | ||
322 | { | ||
323 | struct mesh_path *mpath; | ||
324 | struct mpath_node *node; | ||
325 | struct hlist_head *bucket; | ||
326 | struct hlist_node *n; | ||
327 | int hash_idx; | ||
328 | int err = 0; | ||
329 | |||
330 | read_lock(&pathtbl_resize_lock); | ||
331 | hash_idx = mesh_table_hash(addr, dev, mesh_paths); | ||
332 | bucket = &mesh_paths->hash_buckets[hash_idx]; | ||
333 | |||
334 | spin_lock(&mesh_paths->hashwlock[hash_idx]); | ||
335 | hlist_for_each_entry(node, n, bucket, list) { | ||
336 | mpath = node->mpath; | ||
337 | if (mpath->dev == dev && | ||
338 | memcmp(addr, mpath->dst, ETH_ALEN) == 0) { | ||
339 | spin_lock_bh(&mpath->state_lock); | ||
340 | mpath->flags |= MESH_PATH_RESOLVING; | ||
341 | hlist_del_rcu(&node->list); | ||
342 | call_rcu(&node->rcu, mesh_path_node_reclaim); | ||
343 | atomic_dec(&mesh_paths->entries); | ||
344 | spin_unlock_bh(&mpath->state_lock); | ||
345 | goto enddel; | ||
346 | } | ||
347 | } | ||
348 | |||
349 | err = -ENXIO; | ||
350 | enddel: | ||
351 | spin_unlock(&mesh_paths->hashwlock[hash_idx]); | ||
352 | read_unlock(&pathtbl_resize_lock); | ||
353 | return err; | ||
354 | } | ||
355 | |||
356 | /** | ||
357 | * mesh_path_tx_pending - sends pending frames in a mesh path queue | ||
358 | * | ||
359 | * @mpath: mesh path to activate | ||
360 | * | ||
361 | * Locking: the state_lock of the mpath structure must NOT be held when calling | ||
362 | * this function. | ||
363 | */ | ||
364 | void mesh_path_tx_pending(struct mesh_path *mpath) | ||
365 | { | ||
366 | struct sk_buff *skb; | ||
367 | |||
368 | while ((skb = skb_dequeue(&mpath->frame_queue)) && | ||
369 | (mpath->flags & MESH_PATH_ACTIVE)) | ||
370 | dev_queue_xmit(skb); | ||
371 | } | ||
372 | |||
373 | /** | ||
374 | * mesh_path_discard_frame - discard a frame whose path could not be resolved | ||
375 | * | ||
376 | * @skb: frame to discard | ||
377 | * @dev: network device the frame was to be sent through | ||
378 | * | ||
379 | * If the frame was beign forwarded from another MP, a PERR frame will be sent | ||
380 | * to the precursor. | ||
381 | * | ||
382 | * Locking: the function must me called within a rcu_read_lock region | ||
383 | */ | ||
384 | void mesh_path_discard_frame(struct sk_buff *skb, struct net_device *dev) | ||
385 | { | ||
386 | struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); | ||
387 | struct mesh_path *mpath; | ||
388 | u32 dsn = 0; | ||
389 | |||
390 | if (skb->pkt_type == PACKET_OTHERHOST) { | ||
391 | struct ieee80211s_hdr *prev_meshhdr; | ||
392 | int mshhdrlen; | ||
393 | u8 *ra, *da; | ||
394 | |||
395 | prev_meshhdr = ((struct ieee80211s_hdr *)skb->cb); | ||
396 | mshhdrlen = ieee80211_get_mesh_hdrlen(prev_meshhdr); | ||
397 | da = skb->data; | ||
398 | ra = MESH_PREQ(skb); | ||
399 | mpath = mesh_path_lookup(da, dev); | ||
400 | if (mpath) | ||
401 | dsn = ++mpath->dsn; | ||
402 | mesh_path_error_tx(skb->data, cpu_to_le32(dsn), ra, dev); | ||
403 | } | ||
404 | |||
405 | kfree_skb(skb); | ||
406 | sdata->u.sta.mshstats.dropped_frames_no_route++; | ||
407 | } | ||
408 | |||
409 | /** | ||
410 | * mesh_path_flush_pending - free the pending queue of a mesh path | ||
411 | * | ||
412 | * @mpath: mesh path whose queue has to be freed | ||
413 | * | ||
414 | * Locking: the function must me called withing a rcu_read_lock region | ||
415 | */ | ||
416 | void mesh_path_flush_pending(struct mesh_path *mpath) | ||
417 | { | ||
418 | struct ieee80211_sub_if_data *sdata; | ||
419 | struct sk_buff *skb; | ||
420 | |||
421 | sdata = IEEE80211_DEV_TO_SUB_IF(mpath->dev); | ||
422 | |||
423 | while ((skb = skb_dequeue(&mpath->frame_queue)) && | ||
424 | (mpath->flags & MESH_PATH_ACTIVE)) | ||
425 | mesh_path_discard_frame(skb, mpath->dev); | ||
426 | } | ||
427 | |||
428 | /** | ||
429 | * mesh_path_fix_nexthop - force a specific next hop for a mesh path | ||
430 | * | ||
431 | * @mpath: the mesh path to modify | ||
432 | * @next_hop: the next hop to force | ||
433 | * | ||
434 | * Locking: this function must be called holding mpath->state_lock | ||
435 | */ | ||
436 | void mesh_path_fix_nexthop(struct mesh_path *mpath, struct sta_info *next_hop) | ||
437 | { | ||
438 | spin_lock_bh(&mpath->state_lock); | ||
439 | mesh_path_assign_nexthop(mpath, next_hop); | ||
440 | mpath->dsn = 0xffff; | ||
441 | mpath->metric = 0; | ||
442 | mpath->hop_count = 0; | ||
443 | mpath->exp_time = 0; | ||
444 | mpath->flags |= MESH_PATH_FIXED; | ||
445 | mesh_path_activate(mpath); | ||
446 | spin_unlock_bh(&mpath->state_lock); | ||
447 | mesh_path_tx_pending(mpath); | ||
448 | } | ||
449 | |||
450 | static void mesh_path_node_free(struct hlist_node *p, bool free_leafs) | ||
451 | { | ||
452 | struct mesh_path *mpath; | ||
453 | struct mpath_node *node = hlist_entry(p, struct mpath_node, list); | ||
454 | mpath = node->mpath; | ||
455 | hlist_del_rcu(p); | ||
456 | synchronize_rcu(); | ||
457 | if (free_leafs) | ||
458 | kfree(mpath); | ||
459 | kfree(node); | ||
460 | } | ||
461 | |||
462 | static void mesh_path_node_copy(struct hlist_node *p, struct mesh_table *newtbl) | ||
463 | { | ||
464 | struct mesh_path *mpath; | ||
465 | struct mpath_node *node, *new_node; | ||
466 | u32 hash_idx; | ||
467 | |||
468 | node = hlist_entry(p, struct mpath_node, list); | ||
469 | mpath = node->mpath; | ||
470 | new_node = kmalloc(sizeof(struct mpath_node), GFP_KERNEL); | ||
471 | new_node->mpath = mpath; | ||
472 | hash_idx = mesh_table_hash(mpath->dst, mpath->dev, newtbl); | ||
473 | hlist_add_head(&new_node->list, | ||
474 | &newtbl->hash_buckets[hash_idx]); | ||
475 | } | ||
476 | |||
477 | int mesh_pathtbl_init(void) | ||
478 | { | ||
479 | mesh_paths = mesh_table_alloc(INIT_PATHS_SIZE_ORDER); | ||
480 | mesh_paths->free_node = &mesh_path_node_free; | ||
481 | mesh_paths->copy_node = &mesh_path_node_copy; | ||
482 | mesh_paths->mean_chain_len = MEAN_CHAIN_LEN; | ||
483 | if (!mesh_paths) | ||
484 | return -ENOMEM; | ||
485 | return 0; | ||
486 | } | ||
487 | |||
488 | void mesh_path_expire(struct net_device *dev) | ||
489 | { | ||
490 | struct mesh_path *mpath; | ||
491 | struct mpath_node *node; | ||
492 | struct hlist_node *p; | ||
493 | int i; | ||
494 | |||
495 | read_lock(&pathtbl_resize_lock); | ||
496 | for_each_mesh_entry(mesh_paths, p, node, i) { | ||
497 | if (node->mpath->dev != dev) | ||
498 | continue; | ||
499 | mpath = node->mpath; | ||
500 | spin_lock_bh(&mpath->state_lock); | ||
501 | if ((!(mpath->flags & MESH_PATH_RESOLVING)) && | ||
502 | (!(mpath->flags & MESH_PATH_FIXED)) && | ||
503 | time_after(jiffies, | ||
504 | mpath->exp_time + MESH_PATH_EXPIRE)) { | ||
505 | spin_unlock_bh(&mpath->state_lock); | ||
506 | mesh_path_del(mpath->dst, mpath->dev); | ||
507 | } else | ||
508 | spin_unlock_bh(&mpath->state_lock); | ||
509 | } | ||
510 | read_unlock(&pathtbl_resize_lock); | ||
511 | } | ||
512 | |||
513 | void mesh_pathtbl_unregister(void) | ||
514 | { | ||
515 | mesh_table_free(mesh_paths, true); | ||
516 | } | ||