diff options
author | Javier Cardona <javier@cozybit.com> | 2011-08-09 19:45:08 -0400 |
---|---|---|
committer | John W. Linville <linville@tuxdriver.com> | 2011-08-24 13:59:42 -0400 |
commit | 5ee68e5b39de5cefecf147c58711f8ab01c21231 (patch) | |
tree | 59a39c4dc5a38497aa786689552136d95c690ecf /net/mac80211/mesh_pathtbl.c | |
parent | 00e3f25c8556384bfec2a168c41e885fa6a7748c (diff) |
mac80211: mesh gate implementation
In this implementation, a mesh gate is a root node with a certain bit
set in its RANN flags. The mpath to this root node is marked as a path
to a gate, and added to our list of known gates for this if_mesh. Once a
path discovery process fails, we forward the unresolved frames to a
known gate. Thanks to Luis Rodriguez for refactoring and bug fix help.
Signed-off-by: Javier Cardona <javier@cozybit.com>
Signed-off-by: John W. Linville <linville@tuxdriver.com>
Diffstat (limited to 'net/mac80211/mesh_pathtbl.c')
-rw-r--r-- | net/mac80211/mesh_pathtbl.c | 284 |
1 files changed, 284 insertions, 0 deletions
diff --git a/net/mac80211/mesh_pathtbl.c b/net/mac80211/mesh_pathtbl.c index bcf7fee53b2c..75e4b6022b86 100644 --- a/net/mac80211/mesh_pathtbl.c +++ b/net/mac80211/mesh_pathtbl.c | |||
@@ -66,6 +66,8 @@ static inline struct mesh_table *resize_dereference_mpp_paths(void) | |||
66 | lockdep_is_held(&pathtbl_resize_lock)); | 66 | lockdep_is_held(&pathtbl_resize_lock)); |
67 | } | 67 | } |
68 | 68 | ||
69 | static int mesh_gate_add(struct mesh_table *tbl, struct mesh_path *mpath); | ||
70 | |||
69 | /* | 71 | /* |
70 | * CAREFUL -- "tbl" must not be an expression, | 72 | * CAREFUL -- "tbl" must not be an expression, |
71 | * in particular not an rcu_dereference(), since | 73 | * in particular not an rcu_dereference(), since |
@@ -109,6 +111,7 @@ static struct mesh_table *mesh_table_alloc(int size_order) | |||
109 | sizeof(newtbl->hash_rnd)); | 111 | sizeof(newtbl->hash_rnd)); |
110 | for (i = 0; i <= newtbl->hash_mask; i++) | 112 | for (i = 0; i <= newtbl->hash_mask; i++) |
111 | spin_lock_init(&newtbl->hashwlock[i]); | 113 | spin_lock_init(&newtbl->hashwlock[i]); |
114 | spin_lock_init(&newtbl->gates_lock); | ||
112 | 115 | ||
113 | return newtbl; | 116 | return newtbl; |
114 | } | 117 | } |
@@ -124,6 +127,7 @@ static void mesh_table_free(struct mesh_table *tbl, bool free_leafs) | |||
124 | { | 127 | { |
125 | struct hlist_head *mesh_hash; | 128 | struct hlist_head *mesh_hash; |
126 | struct hlist_node *p, *q; | 129 | struct hlist_node *p, *q; |
130 | struct mpath_node *gate; | ||
127 | int i; | 131 | int i; |
128 | 132 | ||
129 | mesh_hash = tbl->hash_buckets; | 133 | mesh_hash = tbl->hash_buckets; |
@@ -135,6 +139,17 @@ static void mesh_table_free(struct mesh_table *tbl, bool free_leafs) | |||
135 | } | 139 | } |
136 | spin_unlock_bh(&tbl->hashwlock[i]); | 140 | spin_unlock_bh(&tbl->hashwlock[i]); |
137 | } | 141 | } |
142 | if (free_leafs) { | ||
143 | spin_lock_bh(&tbl->gates_lock); | ||
144 | hlist_for_each_entry_safe(gate, p, q, | ||
145 | tbl->known_gates, list) { | ||
146 | hlist_del(&gate->list); | ||
147 | kfree(gate); | ||
148 | } | ||
149 | kfree(tbl->known_gates); | ||
150 | spin_unlock_bh(&tbl->gates_lock); | ||
151 | } | ||
152 | |||
138 | __mesh_table_free(tbl); | 153 | __mesh_table_free(tbl); |
139 | } | 154 | } |
140 | 155 | ||
@@ -152,6 +167,7 @@ static int mesh_table_grow(struct mesh_table *oldtbl, | |||
152 | newtbl->free_node = oldtbl->free_node; | 167 | newtbl->free_node = oldtbl->free_node; |
153 | newtbl->mean_chain_len = oldtbl->mean_chain_len; | 168 | newtbl->mean_chain_len = oldtbl->mean_chain_len; |
154 | newtbl->copy_node = oldtbl->copy_node; | 169 | newtbl->copy_node = oldtbl->copy_node; |
170 | newtbl->known_gates = oldtbl->known_gates; | ||
155 | atomic_set(&newtbl->entries, atomic_read(&oldtbl->entries)); | 171 | atomic_set(&newtbl->entries, atomic_read(&oldtbl->entries)); |
156 | 172 | ||
157 | oldhash = oldtbl->hash_buckets; | 173 | oldhash = oldtbl->hash_buckets; |
@@ -211,6 +227,111 @@ void mesh_path_assign_nexthop(struct mesh_path *mpath, struct sta_info *sta) | |||
211 | spin_unlock_irqrestore(&mpath->frame_queue.lock, flags); | 227 | spin_unlock_irqrestore(&mpath->frame_queue.lock, flags); |
212 | } | 228 | } |
213 | 229 | ||
230 | static void prepare_for_gate(struct sk_buff *skb, char *dst_addr, | ||
231 | struct mesh_path *gate_mpath) | ||
232 | { | ||
233 | struct ieee80211_hdr *hdr; | ||
234 | struct ieee80211s_hdr *mshdr; | ||
235 | int mesh_hdrlen, hdrlen; | ||
236 | char *next_hop; | ||
237 | |||
238 | hdr = (struct ieee80211_hdr *) skb->data; | ||
239 | hdrlen = ieee80211_hdrlen(hdr->frame_control); | ||
240 | mshdr = (struct ieee80211s_hdr *) (skb->data + hdrlen); | ||
241 | |||
242 | if (!(mshdr->flags & MESH_FLAGS_AE)) { | ||
243 | /* size of the fixed part of the mesh header */ | ||
244 | mesh_hdrlen = 6; | ||
245 | |||
246 | /* make room for the two extended addresses */ | ||
247 | skb_push(skb, 2 * ETH_ALEN); | ||
248 | memmove(skb->data, hdr, hdrlen + mesh_hdrlen); | ||
249 | |||
250 | hdr = (struct ieee80211_hdr *) skb->data; | ||
251 | |||
252 | /* we preserve the previous mesh header and only add | ||
253 | * the new addreses */ | ||
254 | mshdr = (struct ieee80211s_hdr *) (skb->data + hdrlen); | ||
255 | mshdr->flags = MESH_FLAGS_AE_A5_A6; | ||
256 | memcpy(mshdr->eaddr1, hdr->addr3, ETH_ALEN); | ||
257 | memcpy(mshdr->eaddr2, hdr->addr4, ETH_ALEN); | ||
258 | } | ||
259 | |||
260 | /* update next hop */ | ||
261 | hdr = (struct ieee80211_hdr *) skb->data; | ||
262 | rcu_read_lock(); | ||
263 | next_hop = rcu_dereference(gate_mpath->next_hop)->sta.addr; | ||
264 | memcpy(hdr->addr1, next_hop, ETH_ALEN); | ||
265 | rcu_read_unlock(); | ||
266 | memcpy(hdr->addr3, dst_addr, ETH_ALEN); | ||
267 | } | ||
268 | |||
269 | /** | ||
270 | * | ||
271 | * mesh_path_move_to_queue - Move or copy frames from one mpath queue to another | ||
272 | * | ||
273 | * This function is used to transfer or copy frames from an unresolved mpath to | ||
274 | * a gate mpath. The function also adds the Address Extension field and | ||
275 | * updates the next hop. | ||
276 | * | ||
277 | * If a frame already has an Address Extension field, only the next hop and | ||
278 | * destination addresses are updated. | ||
279 | * | ||
280 | * The gate mpath must be an active mpath with a valid mpath->next_hop. | ||
281 | * | ||
282 | * @mpath: An active mpath the frames will be sent to (i.e. the gate) | ||
283 | * @from_mpath: The failed mpath | ||
284 | * @copy: When true, copy all the frames to the new mpath queue. When false, | ||
285 | * move them. | ||
286 | */ | ||
287 | static void mesh_path_move_to_queue(struct mesh_path *gate_mpath, | ||
288 | struct mesh_path *from_mpath, | ||
289 | bool copy) | ||
290 | { | ||
291 | struct sk_buff *skb, *cp_skb; | ||
292 | struct sk_buff_head gateq, failq; | ||
293 | unsigned long flags; | ||
294 | int num_skbs; | ||
295 | |||
296 | BUG_ON(gate_mpath == from_mpath); | ||
297 | BUG_ON(!gate_mpath->next_hop); | ||
298 | |||
299 | __skb_queue_head_init(&gateq); | ||
300 | __skb_queue_head_init(&failq); | ||
301 | |||
302 | spin_lock_irqsave(&from_mpath->frame_queue.lock, flags); | ||
303 | skb_queue_splice_init(&from_mpath->frame_queue, &failq); | ||
304 | spin_unlock_irqrestore(&from_mpath->frame_queue.lock, flags); | ||
305 | |||
306 | num_skbs = skb_queue_len(&failq); | ||
307 | |||
308 | while (num_skbs--) { | ||
309 | skb = __skb_dequeue(&failq); | ||
310 | if (copy) | ||
311 | cp_skb = skb_copy(skb, GFP_ATOMIC); | ||
312 | |||
313 | prepare_for_gate(skb, gate_mpath->dst, gate_mpath); | ||
314 | __skb_queue_tail(&gateq, skb); | ||
315 | |||
316 | if (copy && cp_skb) | ||
317 | __skb_queue_tail(&failq, cp_skb); | ||
318 | } | ||
319 | |||
320 | spin_lock_irqsave(&gate_mpath->frame_queue.lock, flags); | ||
321 | skb_queue_splice(&gateq, &gate_mpath->frame_queue); | ||
322 | mpath_dbg("Mpath queue for gate %pM has %d frames\n", | ||
323 | gate_mpath->dst, | ||
324 | skb_queue_len(&gate_mpath->frame_queue)); | ||
325 | spin_unlock_irqrestore(&gate_mpath->frame_queue.lock, flags); | ||
326 | |||
327 | if (!copy) | ||
328 | return; | ||
329 | |||
330 | spin_lock_irqsave(&from_mpath->frame_queue.lock, flags); | ||
331 | skb_queue_splice(&failq, &from_mpath->frame_queue); | ||
332 | spin_unlock_irqrestore(&from_mpath->frame_queue.lock, flags); | ||
333 | } | ||
334 | |||
214 | 335 | ||
215 | /** | 336 | /** |
216 | * mesh_path_lookup - look up a path in the mesh path table | 337 | * mesh_path_lookup - look up a path in the mesh path table |
@@ -310,6 +431,109 @@ struct mesh_path *mesh_path_lookup_by_idx(int idx, struct ieee80211_sub_if_data | |||
310 | return NULL; | 431 | return NULL; |
311 | } | 432 | } |
312 | 433 | ||
434 | static void mesh_gate_node_reclaim(struct rcu_head *rp) | ||
435 | { | ||
436 | struct mpath_node *node = container_of(rp, struct mpath_node, rcu); | ||
437 | kfree(node); | ||
438 | } | ||
439 | |||
440 | /** | ||
441 | * mesh_gate_add - mark mpath as path to a mesh gate and add to known_gates | ||
442 | * @mesh_tbl: table which contains known_gates list | ||
443 | * @mpath: mpath to known mesh gate | ||
444 | * | ||
445 | * Returns: 0 on success | ||
446 | * | ||
447 | */ | ||
448 | static int mesh_gate_add(struct mesh_table *tbl, struct mesh_path *mpath) | ||
449 | { | ||
450 | struct mpath_node *gate, *new_gate; | ||
451 | struct hlist_node *n; | ||
452 | int err; | ||
453 | |||
454 | rcu_read_lock(); | ||
455 | tbl = rcu_dereference(tbl); | ||
456 | |||
457 | hlist_for_each_entry_rcu(gate, n, tbl->known_gates, list) | ||
458 | if (gate->mpath == mpath) { | ||
459 | err = -EEXIST; | ||
460 | goto err_rcu; | ||
461 | } | ||
462 | |||
463 | new_gate = kzalloc(sizeof(struct mpath_node), GFP_ATOMIC); | ||
464 | if (!new_gate) { | ||
465 | err = -ENOMEM; | ||
466 | goto err_rcu; | ||
467 | } | ||
468 | |||
469 | mpath->is_gate = true; | ||
470 | mpath->sdata->u.mesh.num_gates++; | ||
471 | new_gate->mpath = mpath; | ||
472 | spin_lock_bh(&tbl->gates_lock); | ||
473 | hlist_add_head_rcu(&new_gate->list, tbl->known_gates); | ||
474 | spin_unlock_bh(&tbl->gates_lock); | ||
475 | rcu_read_unlock(); | ||
476 | mpath_dbg("Mesh path (%s): Recorded new gate: %pM. %d known gates\n", | ||
477 | mpath->sdata->name, mpath->dst, | ||
478 | mpath->sdata->u.mesh.num_gates); | ||
479 | return 0; | ||
480 | err_rcu: | ||
481 | rcu_read_unlock(); | ||
482 | return err; | ||
483 | } | ||
484 | |||
485 | /** | ||
486 | * mesh_gate_del - remove a mesh gate from the list of known gates | ||
487 | * @tbl: table which holds our list of known gates | ||
488 | * @mpath: gate mpath | ||
489 | * | ||
490 | * Returns: 0 on success | ||
491 | * | ||
492 | * Locking: must be called inside rcu_read_lock() section | ||
493 | */ | ||
494 | static int mesh_gate_del(struct mesh_table *tbl, struct mesh_path *mpath) | ||
495 | { | ||
496 | struct mpath_node *gate; | ||
497 | struct hlist_node *p, *q; | ||
498 | |||
499 | tbl = rcu_dereference(tbl); | ||
500 | |||
501 | hlist_for_each_entry_safe(gate, p, q, tbl->known_gates, list) | ||
502 | if (gate->mpath == mpath) { | ||
503 | spin_lock_bh(&tbl->gates_lock); | ||
504 | hlist_del_rcu(&gate->list); | ||
505 | call_rcu(&gate->rcu, mesh_gate_node_reclaim); | ||
506 | spin_unlock_bh(&tbl->gates_lock); | ||
507 | mpath->sdata->u.mesh.num_gates--; | ||
508 | mpath->is_gate = false; | ||
509 | mpath_dbg("Mesh path (%s): Deleted gate: %pM. " | ||
510 | "%d known gates\n", mpath->sdata->name, | ||
511 | mpath->dst, mpath->sdata->u.mesh.num_gates); | ||
512 | break; | ||
513 | } | ||
514 | |||
515 | return 0; | ||
516 | } | ||
517 | |||
518 | /** | ||
519 | * | ||
520 | * mesh_path_add_gate - add the given mpath to a mesh gate to our path table | ||
521 | * @mpath: gate path to add to table | ||
522 | */ | ||
523 | int mesh_path_add_gate(struct mesh_path *mpath) | ||
524 | { | ||
525 | return mesh_gate_add(mesh_paths, mpath); | ||
526 | } | ||
527 | |||
528 | /** | ||
529 | * mesh_gate_num - number of gates known to this interface | ||
530 | * @sdata: subif data | ||
531 | */ | ||
532 | int mesh_gate_num(struct ieee80211_sub_if_data *sdata) | ||
533 | { | ||
534 | return sdata->u.mesh.num_gates; | ||
535 | } | ||
536 | |||
313 | /** | 537 | /** |
314 | * mesh_path_add - allocate and add a new path to the mesh path table | 538 | * mesh_path_add - allocate and add a new path to the mesh path table |
315 | * @addr: destination address of the path (ETH_ALEN length) | 539 | * @addr: destination address of the path (ETH_ALEN length) |
@@ -655,6 +879,8 @@ int mesh_path_del(u8 *addr, struct ieee80211_sub_if_data *sdata) | |||
655 | if (mpath->sdata == sdata && | 879 | if (mpath->sdata == sdata && |
656 | memcmp(addr, mpath->dst, ETH_ALEN) == 0) { | 880 | memcmp(addr, mpath->dst, ETH_ALEN) == 0) { |
657 | spin_lock_bh(&mpath->state_lock); | 881 | spin_lock_bh(&mpath->state_lock); |
882 | if (mpath->is_gate) | ||
883 | mesh_gate_del(tbl, mpath); | ||
658 | mpath->flags |= MESH_PATH_RESOLVING; | 884 | mpath->flags |= MESH_PATH_RESOLVING; |
659 | hlist_del_rcu(&node->list); | 885 | hlist_del_rcu(&node->list); |
660 | call_rcu(&node->rcu, mesh_path_node_reclaim); | 886 | call_rcu(&node->rcu, mesh_path_node_reclaim); |
@@ -688,6 +914,58 @@ void mesh_path_tx_pending(struct mesh_path *mpath) | |||
688 | } | 914 | } |
689 | 915 | ||
690 | /** | 916 | /** |
917 | * mesh_path_send_to_gates - sends pending frames to all known mesh gates | ||
918 | * | ||
919 | * @mpath: mesh path whose queue will be emptied | ||
920 | * | ||
921 | * If there is only one gate, the frames are transferred from the failed mpath | ||
922 | * queue to that gate's queue. If there are more than one gates, the frames | ||
923 | * are copied from each gate to the next. After frames are copied, the | ||
924 | * mpath queues are emptied onto the transmission queue. | ||
925 | */ | ||
926 | int mesh_path_send_to_gates(struct mesh_path *mpath) | ||
927 | { | ||
928 | struct ieee80211_sub_if_data *sdata = mpath->sdata; | ||
929 | struct hlist_node *n; | ||
930 | struct mesh_table *tbl; | ||
931 | struct mesh_path *from_mpath = mpath; | ||
932 | struct mpath_node *gate = NULL; | ||
933 | bool copy = false; | ||
934 | struct hlist_head *known_gates; | ||
935 | |||
936 | rcu_read_lock(); | ||
937 | tbl = rcu_dereference(mesh_paths); | ||
938 | known_gates = tbl->known_gates; | ||
939 | rcu_read_unlock(); | ||
940 | |||
941 | if (!known_gates) | ||
942 | return -EHOSTUNREACH; | ||
943 | |||
944 | hlist_for_each_entry_rcu(gate, n, known_gates, list) { | ||
945 | if (gate->mpath->sdata != sdata) | ||
946 | continue; | ||
947 | |||
948 | if (gate->mpath->flags & MESH_PATH_ACTIVE) { | ||
949 | mpath_dbg("Forwarding to %pM\n", gate->mpath->dst); | ||
950 | mesh_path_move_to_queue(gate->mpath, from_mpath, copy); | ||
951 | from_mpath = gate->mpath; | ||
952 | copy = true; | ||
953 | } else { | ||
954 | mpath_dbg("Not forwarding %p\n", gate->mpath); | ||
955 | mpath_dbg("flags %x\n", gate->mpath->flags); | ||
956 | } | ||
957 | } | ||
958 | |||
959 | hlist_for_each_entry_rcu(gate, n, known_gates, list) | ||
960 | if (gate->mpath->sdata == sdata) { | ||
961 | mpath_dbg("Sending to %pM\n", gate->mpath->dst); | ||
962 | mesh_path_tx_pending(gate->mpath); | ||
963 | } | ||
964 | |||
965 | return (from_mpath == mpath) ? -EHOSTUNREACH : 0; | ||
966 | } | ||
967 | |||
968 | /** | ||
691 | * mesh_path_discard_frame - discard a frame whose path could not be resolved | 969 | * mesh_path_discard_frame - discard a frame whose path could not be resolved |
692 | * | 970 | * |
693 | * @skb: frame to discard | 971 | * @skb: frame to discard |
@@ -804,6 +1082,9 @@ int mesh_pathtbl_init(void) | |||
804 | tbl_path->free_node = &mesh_path_node_free; | 1082 | tbl_path->free_node = &mesh_path_node_free; |
805 | tbl_path->copy_node = &mesh_path_node_copy; | 1083 | tbl_path->copy_node = &mesh_path_node_copy; |
806 | tbl_path->mean_chain_len = MEAN_CHAIN_LEN; | 1084 | tbl_path->mean_chain_len = MEAN_CHAIN_LEN; |
1085 | tbl_path->known_gates = kzalloc(sizeof(struct hlist_head), GFP_ATOMIC); | ||
1086 | INIT_HLIST_HEAD(tbl_path->known_gates); | ||
1087 | |||
807 | 1088 | ||
808 | tbl_mpp = mesh_table_alloc(INIT_PATHS_SIZE_ORDER); | 1089 | tbl_mpp = mesh_table_alloc(INIT_PATHS_SIZE_ORDER); |
809 | if (!tbl_mpp) { | 1090 | if (!tbl_mpp) { |
@@ -813,6 +1094,9 @@ int mesh_pathtbl_init(void) | |||
813 | tbl_mpp->free_node = &mesh_path_node_free; | 1094 | tbl_mpp->free_node = &mesh_path_node_free; |
814 | tbl_mpp->copy_node = &mesh_path_node_copy; | 1095 | tbl_mpp->copy_node = &mesh_path_node_copy; |
815 | tbl_mpp->mean_chain_len = MEAN_CHAIN_LEN; | 1096 | tbl_mpp->mean_chain_len = MEAN_CHAIN_LEN; |
1097 | /* XXX: not needed */ | ||
1098 | tbl_mpp->known_gates = kzalloc(sizeof(struct hlist_head), GFP_ATOMIC); | ||
1099 | INIT_HLIST_HEAD(tbl_mpp->known_gates); | ||
816 | 1100 | ||
817 | /* Need no locking since this is during init */ | 1101 | /* Need no locking since this is during init */ |
818 | RCU_INIT_POINTER(mesh_paths, tbl_path); | 1102 | RCU_INIT_POINTER(mesh_paths, tbl_path); |