aboutsummaryrefslogtreecommitdiffstats
path: root/net/mac80211/mesh_pathtbl.c
diff options
context:
space:
mode:
Diffstat (limited to 'net/mac80211/mesh_pathtbl.c')
-rw-r--r--net/mac80211/mesh_pathtbl.c481
1 files changed, 403 insertions, 78 deletions
diff --git a/net/mac80211/mesh_pathtbl.c b/net/mac80211/mesh_pathtbl.c
index 068ee6518254..7f54c5042235 100644
--- a/net/mac80211/mesh_pathtbl.c
+++ b/net/mac80211/mesh_pathtbl.c
@@ -14,9 +14,16 @@
14#include <linux/spinlock.h> 14#include <linux/spinlock.h>
15#include <linux/string.h> 15#include <linux/string.h>
16#include <net/mac80211.h> 16#include <net/mac80211.h>
17#include "wme.h"
17#include "ieee80211_i.h" 18#include "ieee80211_i.h"
18#include "mesh.h" 19#include "mesh.h"
19 20
21#ifdef CONFIG_MAC80211_VERBOSE_MPATH_DEBUG
22#define mpath_dbg(fmt, args...) printk(KERN_DEBUG fmt, ##args)
23#else
24#define mpath_dbg(fmt, args...) do { (void)(0); } while (0)
25#endif
26
20/* There will be initially 2^INIT_PATHS_SIZE_ORDER buckets */ 27/* There will be initially 2^INIT_PATHS_SIZE_ORDER buckets */
21#define INIT_PATHS_SIZE_ORDER 2 28#define INIT_PATHS_SIZE_ORDER 2
22 29
@@ -42,8 +49,10 @@ static struct mesh_table __rcu *mpp_paths; /* Store paths for MPP&MAP */
42int mesh_paths_generation; 49int mesh_paths_generation;
43 50
44/* This lock will have the grow table function as writer and add / delete nodes 51/* This lock will have the grow table function as writer and add / delete nodes
45 * as readers. When reading the table (i.e. doing lookups) we are well protected 52 * as readers. RCU provides sufficient protection only when reading the table
46 * by RCU 53 * (i.e. doing lookups). Adding or adding or removing nodes requires we take
54 * the read lock or we risk operating on an old table. The write lock is only
55 * needed when modifying the number of buckets a table.
47 */ 56 */
48static DEFINE_RWLOCK(pathtbl_resize_lock); 57static DEFINE_RWLOCK(pathtbl_resize_lock);
49 58
@@ -60,6 +69,8 @@ static inline struct mesh_table *resize_dereference_mpp_paths(void)
60 lockdep_is_held(&pathtbl_resize_lock)); 69 lockdep_is_held(&pathtbl_resize_lock));
61} 70}
62 71
72static int mesh_gate_add(struct mesh_table *tbl, struct mesh_path *mpath);
73
63/* 74/*
64 * CAREFUL -- "tbl" must not be an expression, 75 * CAREFUL -- "tbl" must not be an expression,
65 * in particular not an rcu_dereference(), since 76 * in particular not an rcu_dereference(), since
@@ -103,6 +114,7 @@ static struct mesh_table *mesh_table_alloc(int size_order)
103 sizeof(newtbl->hash_rnd)); 114 sizeof(newtbl->hash_rnd));
104 for (i = 0; i <= newtbl->hash_mask; i++) 115 for (i = 0; i <= newtbl->hash_mask; i++)
105 spin_lock_init(&newtbl->hashwlock[i]); 116 spin_lock_init(&newtbl->hashwlock[i]);
117 spin_lock_init(&newtbl->gates_lock);
106 118
107 return newtbl; 119 return newtbl;
108} 120}
@@ -118,6 +130,7 @@ static void mesh_table_free(struct mesh_table *tbl, bool free_leafs)
118{ 130{
119 struct hlist_head *mesh_hash; 131 struct hlist_head *mesh_hash;
120 struct hlist_node *p, *q; 132 struct hlist_node *p, *q;
133 struct mpath_node *gate;
121 int i; 134 int i;
122 135
123 mesh_hash = tbl->hash_buckets; 136 mesh_hash = tbl->hash_buckets;
@@ -129,6 +142,17 @@ static void mesh_table_free(struct mesh_table *tbl, bool free_leafs)
129 } 142 }
130 spin_unlock_bh(&tbl->hashwlock[i]); 143 spin_unlock_bh(&tbl->hashwlock[i]);
131 } 144 }
145 if (free_leafs) {
146 spin_lock_bh(&tbl->gates_lock);
147 hlist_for_each_entry_safe(gate, p, q,
148 tbl->known_gates, list) {
149 hlist_del(&gate->list);
150 kfree(gate);
151 }
152 kfree(tbl->known_gates);
153 spin_unlock_bh(&tbl->gates_lock);
154 }
155
132 __mesh_table_free(tbl); 156 __mesh_table_free(tbl);
133} 157}
134 158
@@ -146,6 +170,7 @@ static int mesh_table_grow(struct mesh_table *oldtbl,
146 newtbl->free_node = oldtbl->free_node; 170 newtbl->free_node = oldtbl->free_node;
147 newtbl->mean_chain_len = oldtbl->mean_chain_len; 171 newtbl->mean_chain_len = oldtbl->mean_chain_len;
148 newtbl->copy_node = oldtbl->copy_node; 172 newtbl->copy_node = oldtbl->copy_node;
173 newtbl->known_gates = oldtbl->known_gates;
149 atomic_set(&newtbl->entries, atomic_read(&oldtbl->entries)); 174 atomic_set(&newtbl->entries, atomic_read(&oldtbl->entries));
150 175
151 oldhash = oldtbl->hash_buckets; 176 oldhash = oldtbl->hash_buckets;
@@ -188,6 +213,7 @@ void mesh_path_assign_nexthop(struct mesh_path *mpath, struct sta_info *sta)
188 struct ieee80211_hdr *hdr; 213 struct ieee80211_hdr *hdr;
189 struct sk_buff_head tmpq; 214 struct sk_buff_head tmpq;
190 unsigned long flags; 215 unsigned long flags;
216 struct ieee80211_sub_if_data *sdata = mpath->sdata;
191 217
192 rcu_assign_pointer(mpath->next_hop, sta); 218 rcu_assign_pointer(mpath->next_hop, sta);
193 219
@@ -198,6 +224,8 @@ void mesh_path_assign_nexthop(struct mesh_path *mpath, struct sta_info *sta)
198 while ((skb = __skb_dequeue(&mpath->frame_queue)) != NULL) { 224 while ((skb = __skb_dequeue(&mpath->frame_queue)) != NULL) {
199 hdr = (struct ieee80211_hdr *) skb->data; 225 hdr = (struct ieee80211_hdr *) skb->data;
200 memcpy(hdr->addr1, sta->sta.addr, ETH_ALEN); 226 memcpy(hdr->addr1, sta->sta.addr, ETH_ALEN);
227 skb_set_queue_mapping(skb, ieee80211_select_queue(sdata, skb));
228 ieee80211_set_qos_hdr(sdata, skb);
201 __skb_queue_tail(&tmpq, skb); 229 __skb_queue_tail(&tmpq, skb);
202 } 230 }
203 231
@@ -205,62 +233,128 @@ void mesh_path_assign_nexthop(struct mesh_path *mpath, struct sta_info *sta)
205 spin_unlock_irqrestore(&mpath->frame_queue.lock, flags); 233 spin_unlock_irqrestore(&mpath->frame_queue.lock, flags);
206} 234}
207 235
236static void prepare_for_gate(struct sk_buff *skb, char *dst_addr,
237 struct mesh_path *gate_mpath)
238{
239 struct ieee80211_hdr *hdr;
240 struct ieee80211s_hdr *mshdr;
241 int mesh_hdrlen, hdrlen;
242 char *next_hop;
243
244 hdr = (struct ieee80211_hdr *) skb->data;
245 hdrlen = ieee80211_hdrlen(hdr->frame_control);
246 mshdr = (struct ieee80211s_hdr *) (skb->data + hdrlen);
247
248 if (!(mshdr->flags & MESH_FLAGS_AE)) {
249 /* size of the fixed part of the mesh header */
250 mesh_hdrlen = 6;
251
252 /* make room for the two extended addresses */
253 skb_push(skb, 2 * ETH_ALEN);
254 memmove(skb->data, hdr, hdrlen + mesh_hdrlen);
255
256 hdr = (struct ieee80211_hdr *) skb->data;
257
258 /* we preserve the previous mesh header and only add
259 * the new addreses */
260 mshdr = (struct ieee80211s_hdr *) (skb->data + hdrlen);
261 mshdr->flags = MESH_FLAGS_AE_A5_A6;
262 memcpy(mshdr->eaddr1, hdr->addr3, ETH_ALEN);
263 memcpy(mshdr->eaddr2, hdr->addr4, ETH_ALEN);
264 }
265
266 /* update next hop */
267 hdr = (struct ieee80211_hdr *) skb->data;
268 rcu_read_lock();
269 next_hop = rcu_dereference(gate_mpath->next_hop)->sta.addr;
270 memcpy(hdr->addr1, next_hop, ETH_ALEN);
271 rcu_read_unlock();
272 memcpy(hdr->addr3, dst_addr, ETH_ALEN);
273}
208 274
209/** 275/**
210 * mesh_path_lookup - look up a path in the mesh path table
211 * @dst: hardware address (ETH_ALEN length) of destination
212 * @sdata: local subif
213 * 276 *
214 * Returns: pointer to the mesh path structure, or NULL if not found 277 * mesh_path_move_to_queue - Move or copy frames from one mpath queue to another
215 * 278 *
216 * Locking: must be called within a read rcu section. 279 * This function is used to transfer or copy frames from an unresolved mpath to
280 * a gate mpath. The function also adds the Address Extension field and
281 * updates the next hop.
282 *
283 * If a frame already has an Address Extension field, only the next hop and
284 * destination addresses are updated.
285 *
286 * The gate mpath must be an active mpath with a valid mpath->next_hop.
287 *
288 * @mpath: An active mpath the frames will be sent to (i.e. the gate)
289 * @from_mpath: The failed mpath
290 * @copy: When true, copy all the frames to the new mpath queue. When false,
291 * move them.
217 */ 292 */
218struct mesh_path *mesh_path_lookup(u8 *dst, struct ieee80211_sub_if_data *sdata) 293static void mesh_path_move_to_queue(struct mesh_path *gate_mpath,
294 struct mesh_path *from_mpath,
295 bool copy)
219{ 296{
220 struct mesh_path *mpath; 297 struct sk_buff *skb, *cp_skb = NULL;
221 struct hlist_node *n; 298 struct sk_buff_head gateq, failq;
222 struct hlist_head *bucket; 299 unsigned long flags;
223 struct mesh_table *tbl; 300 int num_skbs;
224 struct mpath_node *node;
225 301
226 tbl = rcu_dereference(mesh_paths); 302 BUG_ON(gate_mpath == from_mpath);
303 BUG_ON(!gate_mpath->next_hop);
227 304
228 bucket = &tbl->hash_buckets[mesh_table_hash(dst, sdata, tbl)]; 305 __skb_queue_head_init(&gateq);
229 hlist_for_each_entry_rcu(node, n, bucket, list) { 306 __skb_queue_head_init(&failq);
230 mpath = node->mpath; 307
231 if (mpath->sdata == sdata && 308 spin_lock_irqsave(&from_mpath->frame_queue.lock, flags);
232 memcmp(dst, mpath->dst, ETH_ALEN) == 0) { 309 skb_queue_splice_init(&from_mpath->frame_queue, &failq);
233 if (MPATH_EXPIRED(mpath)) { 310 spin_unlock_irqrestore(&from_mpath->frame_queue.lock, flags);
234 spin_lock_bh(&mpath->state_lock); 311
235 if (MPATH_EXPIRED(mpath)) 312 num_skbs = skb_queue_len(&failq);
236 mpath->flags &= ~MESH_PATH_ACTIVE; 313
237 spin_unlock_bh(&mpath->state_lock); 314 while (num_skbs--) {
238 } 315 skb = __skb_dequeue(&failq);
239 return mpath; 316 if (copy) {
317 cp_skb = skb_copy(skb, GFP_ATOMIC);
318 if (cp_skb)
319 __skb_queue_tail(&failq, cp_skb);
240 } 320 }
321
322 prepare_for_gate(skb, gate_mpath->dst, gate_mpath);
323 __skb_queue_tail(&gateq, skb);
241 } 324 }
242 return NULL; 325
326 spin_lock_irqsave(&gate_mpath->frame_queue.lock, flags);
327 skb_queue_splice(&gateq, &gate_mpath->frame_queue);
328 mpath_dbg("Mpath queue for gate %pM has %d frames\n",
329 gate_mpath->dst,
330 skb_queue_len(&gate_mpath->frame_queue));
331 spin_unlock_irqrestore(&gate_mpath->frame_queue.lock, flags);
332
333 if (!copy)
334 return;
335
336 spin_lock_irqsave(&from_mpath->frame_queue.lock, flags);
337 skb_queue_splice(&failq, &from_mpath->frame_queue);
338 spin_unlock_irqrestore(&from_mpath->frame_queue.lock, flags);
243} 339}
244 340
245struct mesh_path *mpp_path_lookup(u8 *dst, struct ieee80211_sub_if_data *sdata) 341
342static struct mesh_path *path_lookup(struct mesh_table *tbl, u8 *dst,
343 struct ieee80211_sub_if_data *sdata)
246{ 344{
247 struct mesh_path *mpath; 345 struct mesh_path *mpath;
248 struct hlist_node *n; 346 struct hlist_node *n;
249 struct hlist_head *bucket; 347 struct hlist_head *bucket;
250 struct mesh_table *tbl;
251 struct mpath_node *node; 348 struct mpath_node *node;
252 349
253 tbl = rcu_dereference(mpp_paths);
254
255 bucket = &tbl->hash_buckets[mesh_table_hash(dst, sdata, tbl)]; 350 bucket = &tbl->hash_buckets[mesh_table_hash(dst, sdata, tbl)];
256 hlist_for_each_entry_rcu(node, n, bucket, list) { 351 hlist_for_each_entry_rcu(node, n, bucket, list) {
257 mpath = node->mpath; 352 mpath = node->mpath;
258 if (mpath->sdata == sdata && 353 if (mpath->sdata == sdata &&
259 memcmp(dst, mpath->dst, ETH_ALEN) == 0) { 354 memcmp(dst, mpath->dst, ETH_ALEN) == 0) {
260 if (MPATH_EXPIRED(mpath)) { 355 if (MPATH_EXPIRED(mpath)) {
261 spin_lock_bh(&mpath->state_lock); 356 spin_lock_bh(&mpath->state_lock);
262 if (MPATH_EXPIRED(mpath)) 357 mpath->flags &= ~MESH_PATH_ACTIVE;
263 mpath->flags &= ~MESH_PATH_ACTIVE;
264 spin_unlock_bh(&mpath->state_lock); 358 spin_unlock_bh(&mpath->state_lock);
265 } 359 }
266 return mpath; 360 return mpath;
@@ -269,6 +363,25 @@ struct mesh_path *mpp_path_lookup(u8 *dst, struct ieee80211_sub_if_data *sdata)
269 return NULL; 363 return NULL;
270} 364}
271 365
366/**
367 * mesh_path_lookup - look up a path in the mesh path table
368 * @dst: hardware address (ETH_ALEN length) of destination
369 * @sdata: local subif
370 *
371 * Returns: pointer to the mesh path structure, or NULL if not found
372 *
373 * Locking: must be called within a read rcu section.
374 */
375struct mesh_path *mesh_path_lookup(u8 *dst, struct ieee80211_sub_if_data *sdata)
376{
377 return path_lookup(rcu_dereference(mesh_paths), dst, sdata);
378}
379
380struct mesh_path *mpp_path_lookup(u8 *dst, struct ieee80211_sub_if_data *sdata)
381{
382 return path_lookup(rcu_dereference(mpp_paths), dst, sdata);
383}
384
272 385
273/** 386/**
274 * mesh_path_lookup_by_idx - look up a path in the mesh path table by its index 387 * mesh_path_lookup_by_idx - look up a path in the mesh path table by its index
@@ -293,8 +406,7 @@ struct mesh_path *mesh_path_lookup_by_idx(int idx, struct ieee80211_sub_if_data
293 if (j++ == idx) { 406 if (j++ == idx) {
294 if (MPATH_EXPIRED(node->mpath)) { 407 if (MPATH_EXPIRED(node->mpath)) {
295 spin_lock_bh(&node->mpath->state_lock); 408 spin_lock_bh(&node->mpath->state_lock);
296 if (MPATH_EXPIRED(node->mpath)) 409 node->mpath->flags &= ~MESH_PATH_ACTIVE;
297 node->mpath->flags &= ~MESH_PATH_ACTIVE;
298 spin_unlock_bh(&node->mpath->state_lock); 410 spin_unlock_bh(&node->mpath->state_lock);
299 } 411 }
300 return node->mpath; 412 return node->mpath;
@@ -304,6 +416,109 @@ struct mesh_path *mesh_path_lookup_by_idx(int idx, struct ieee80211_sub_if_data
304 return NULL; 416 return NULL;
305} 417}
306 418
419static void mesh_gate_node_reclaim(struct rcu_head *rp)
420{
421 struct mpath_node *node = container_of(rp, struct mpath_node, rcu);
422 kfree(node);
423}
424
425/**
426 * mesh_gate_add - mark mpath as path to a mesh gate and add to known_gates
427 * @mesh_tbl: table which contains known_gates list
428 * @mpath: mpath to known mesh gate
429 *
430 * Returns: 0 on success
431 *
432 */
433static int mesh_gate_add(struct mesh_table *tbl, struct mesh_path *mpath)
434{
435 struct mpath_node *gate, *new_gate;
436 struct hlist_node *n;
437 int err;
438
439 rcu_read_lock();
440 tbl = rcu_dereference(tbl);
441
442 hlist_for_each_entry_rcu(gate, n, tbl->known_gates, list)
443 if (gate->mpath == mpath) {
444 err = -EEXIST;
445 goto err_rcu;
446 }
447
448 new_gate = kzalloc(sizeof(struct mpath_node), GFP_ATOMIC);
449 if (!new_gate) {
450 err = -ENOMEM;
451 goto err_rcu;
452 }
453
454 mpath->is_gate = true;
455 mpath->sdata->u.mesh.num_gates++;
456 new_gate->mpath = mpath;
457 spin_lock_bh(&tbl->gates_lock);
458 hlist_add_head_rcu(&new_gate->list, tbl->known_gates);
459 spin_unlock_bh(&tbl->gates_lock);
460 rcu_read_unlock();
461 mpath_dbg("Mesh path (%s): Recorded new gate: %pM. %d known gates\n",
462 mpath->sdata->name, mpath->dst,
463 mpath->sdata->u.mesh.num_gates);
464 return 0;
465err_rcu:
466 rcu_read_unlock();
467 return err;
468}
469
470/**
471 * mesh_gate_del - remove a mesh gate from the list of known gates
472 * @tbl: table which holds our list of known gates
473 * @mpath: gate mpath
474 *
475 * Returns: 0 on success
476 *
477 * Locking: must be called inside rcu_read_lock() section
478 */
479static int mesh_gate_del(struct mesh_table *tbl, struct mesh_path *mpath)
480{
481 struct mpath_node *gate;
482 struct hlist_node *p, *q;
483
484 tbl = rcu_dereference(tbl);
485
486 hlist_for_each_entry_safe(gate, p, q, tbl->known_gates, list)
487 if (gate->mpath == mpath) {
488 spin_lock_bh(&tbl->gates_lock);
489 hlist_del_rcu(&gate->list);
490 call_rcu(&gate->rcu, mesh_gate_node_reclaim);
491 spin_unlock_bh(&tbl->gates_lock);
492 mpath->sdata->u.mesh.num_gates--;
493 mpath->is_gate = false;
494 mpath_dbg("Mesh path (%s): Deleted gate: %pM. "
495 "%d known gates\n", mpath->sdata->name,
496 mpath->dst, mpath->sdata->u.mesh.num_gates);
497 break;
498 }
499
500 return 0;
501}
502
503/**
504 *
505 * mesh_path_add_gate - add the given mpath to a mesh gate to our path table
506 * @mpath: gate path to add to table
507 */
508int mesh_path_add_gate(struct mesh_path *mpath)
509{
510 return mesh_gate_add(mesh_paths, mpath);
511}
512
513/**
514 * mesh_gate_num - number of gates known to this interface
515 * @sdata: subif data
516 */
517int mesh_gate_num(struct ieee80211_sub_if_data *sdata)
518{
519 return sdata->u.mesh.num_gates;
520}
521
307/** 522/**
308 * mesh_path_add - allocate and add a new path to the mesh path table 523 * mesh_path_add - allocate and add a new path to the mesh path table
309 * @addr: destination address of the path (ETH_ALEN length) 524 * @addr: destination address of the path (ETH_ALEN length)
@@ -481,6 +696,7 @@ int mpp_path_add(u8 *dst, u8 *mpp, struct ieee80211_sub_if_data *sdata)
481 new_mpath->flags = 0; 696 new_mpath->flags = 0;
482 skb_queue_head_init(&new_mpath->frame_queue); 697 skb_queue_head_init(&new_mpath->frame_queue);
483 new_node->mpath = new_mpath; 698 new_node->mpath = new_mpath;
699 init_timer(&new_mpath->timer);
484 new_mpath->exp_time = jiffies; 700 new_mpath->exp_time = jiffies;
485 spin_lock_init(&new_mpath->state_lock); 701 spin_lock_init(&new_mpath->state_lock);
486 702
@@ -539,28 +755,53 @@ void mesh_plink_broken(struct sta_info *sta)
539 struct hlist_node *p; 755 struct hlist_node *p;
540 struct ieee80211_sub_if_data *sdata = sta->sdata; 756 struct ieee80211_sub_if_data *sdata = sta->sdata;
541 int i; 757 int i;
758 __le16 reason = cpu_to_le16(WLAN_REASON_MESH_PATH_DEST_UNREACHABLE);
542 759
543 rcu_read_lock(); 760 rcu_read_lock();
544 tbl = rcu_dereference(mesh_paths); 761 tbl = rcu_dereference(mesh_paths);
545 for_each_mesh_entry(tbl, p, node, i) { 762 for_each_mesh_entry(tbl, p, node, i) {
546 mpath = node->mpath; 763 mpath = node->mpath;
547 spin_lock_bh(&mpath->state_lock);
548 if (rcu_dereference(mpath->next_hop) == sta && 764 if (rcu_dereference(mpath->next_hop) == sta &&
549 mpath->flags & MESH_PATH_ACTIVE && 765 mpath->flags & MESH_PATH_ACTIVE &&
550 !(mpath->flags & MESH_PATH_FIXED)) { 766 !(mpath->flags & MESH_PATH_FIXED)) {
767 spin_lock_bh(&mpath->state_lock);
551 mpath->flags &= ~MESH_PATH_ACTIVE; 768 mpath->flags &= ~MESH_PATH_ACTIVE;
552 ++mpath->sn; 769 ++mpath->sn;
553 spin_unlock_bh(&mpath->state_lock); 770 spin_unlock_bh(&mpath->state_lock);
554 mesh_path_error_tx(sdata->u.mesh.mshcfg.element_ttl, 771 mesh_path_error_tx(sdata->u.mesh.mshcfg.element_ttl,
555 mpath->dst, cpu_to_le32(mpath->sn), 772 mpath->dst, cpu_to_le32(mpath->sn),
556 cpu_to_le16(PERR_RCODE_DEST_UNREACH), 773 reason, bcast, sdata);
557 bcast, sdata); 774 }
558 } else
559 spin_unlock_bh(&mpath->state_lock);
560 } 775 }
561 rcu_read_unlock(); 776 rcu_read_unlock();
562} 777}
563 778
779static void mesh_path_node_reclaim(struct rcu_head *rp)
780{
781 struct mpath_node *node = container_of(rp, struct mpath_node, rcu);
782 struct ieee80211_sub_if_data *sdata = node->mpath->sdata;
783
784 del_timer_sync(&node->mpath->timer);
785 atomic_dec(&sdata->u.mesh.mpaths);
786 kfree(node->mpath);
787 kfree(node);
788}
789
790/* needs to be called with the corresponding hashwlock taken */
791static void __mesh_path_del(struct mesh_table *tbl, struct mpath_node *node)
792{
793 struct mesh_path *mpath;
794 mpath = node->mpath;
795 spin_lock(&mpath->state_lock);
796 mpath->flags |= MESH_PATH_RESOLVING;
797 if (mpath->is_gate)
798 mesh_gate_del(tbl, mpath);
799 hlist_del_rcu(&node->list);
800 call_rcu(&node->rcu, mesh_path_node_reclaim);
801 spin_unlock(&mpath->state_lock);
802 atomic_dec(&tbl->entries);
803}
804
564/** 805/**
565 * mesh_path_flush_by_nexthop - Deletes mesh paths if their next hop matches 806 * mesh_path_flush_by_nexthop - Deletes mesh paths if their next hop matches
566 * 807 *
@@ -581,42 +822,59 @@ void mesh_path_flush_by_nexthop(struct sta_info *sta)
581 int i; 822 int i;
582 823
583 rcu_read_lock(); 824 rcu_read_lock();
584 tbl = rcu_dereference(mesh_paths); 825 read_lock_bh(&pathtbl_resize_lock);
826 tbl = resize_dereference_mesh_paths();
585 for_each_mesh_entry(tbl, p, node, i) { 827 for_each_mesh_entry(tbl, p, node, i) {
586 mpath = node->mpath; 828 mpath = node->mpath;
587 if (rcu_dereference(mpath->next_hop) == sta) 829 if (rcu_dereference(mpath->next_hop) == sta) {
588 mesh_path_del(mpath->dst, mpath->sdata); 830 spin_lock_bh(&tbl->hashwlock[i]);
831 __mesh_path_del(tbl, node);
832 spin_unlock_bh(&tbl->hashwlock[i]);
833 }
589 } 834 }
835 read_unlock_bh(&pathtbl_resize_lock);
590 rcu_read_unlock(); 836 rcu_read_unlock();
591} 837}
592 838
593void mesh_path_flush(struct ieee80211_sub_if_data *sdata) 839static void table_flush_by_iface(struct mesh_table *tbl,
840 struct ieee80211_sub_if_data *sdata)
594{ 841{
595 struct mesh_table *tbl;
596 struct mesh_path *mpath; 842 struct mesh_path *mpath;
597 struct mpath_node *node; 843 struct mpath_node *node;
598 struct hlist_node *p; 844 struct hlist_node *p;
599 int i; 845 int i;
600 846
601 rcu_read_lock(); 847 WARN_ON(!rcu_read_lock_held());
602 tbl = rcu_dereference(mesh_paths);
603 for_each_mesh_entry(tbl, p, node, i) { 848 for_each_mesh_entry(tbl, p, node, i) {
604 mpath = node->mpath; 849 mpath = node->mpath;
605 if (mpath->sdata == sdata) 850 if (mpath->sdata != sdata)
606 mesh_path_del(mpath->dst, mpath->sdata); 851 continue;
852 spin_lock_bh(&tbl->hashwlock[i]);
853 __mesh_path_del(tbl, node);
854 spin_unlock_bh(&tbl->hashwlock[i]);
607 } 855 }
608 rcu_read_unlock();
609} 856}
610 857
611static void mesh_path_node_reclaim(struct rcu_head *rp) 858/**
859 * mesh_path_flush_by_iface - Deletes all mesh paths associated with a given iface
860 *
861 * This function deletes both mesh paths as well as mesh portal paths.
862 *
863 * @sdata - interface data to match
864 *
865 */
866void mesh_path_flush_by_iface(struct ieee80211_sub_if_data *sdata)
612{ 867{
613 struct mpath_node *node = container_of(rp, struct mpath_node, rcu); 868 struct mesh_table *tbl;
614 struct ieee80211_sub_if_data *sdata = node->mpath->sdata;
615 869
616 del_timer_sync(&node->mpath->timer); 870 rcu_read_lock();
617 atomic_dec(&sdata->u.mesh.mpaths); 871 read_lock_bh(&pathtbl_resize_lock);
618 kfree(node->mpath); 872 tbl = resize_dereference_mesh_paths();
619 kfree(node); 873 table_flush_by_iface(tbl, sdata);
874 tbl = resize_dereference_mpp_paths();
875 table_flush_by_iface(tbl, sdata);
876 read_unlock_bh(&pathtbl_resize_lock);
877 rcu_read_unlock();
620} 878}
621 879
622/** 880/**
@@ -647,12 +905,7 @@ int mesh_path_del(u8 *addr, struct ieee80211_sub_if_data *sdata)
647 mpath = node->mpath; 905 mpath = node->mpath;
648 if (mpath->sdata == sdata && 906 if (mpath->sdata == sdata &&
649 memcmp(addr, mpath->dst, ETH_ALEN) == 0) { 907 memcmp(addr, mpath->dst, ETH_ALEN) == 0) {
650 spin_lock(&mpath->state_lock); 908 __mesh_path_del(tbl, node);
651 mpath->flags |= MESH_PATH_RESOLVING;
652 hlist_del_rcu(&node->list);
653 call_rcu(&node->rcu, mesh_path_node_reclaim);
654 atomic_dec(&tbl->entries);
655 spin_unlock(&mpath->state_lock);
656 goto enddel; 909 goto enddel;
657 } 910 }
658 } 911 }
@@ -681,6 +934,58 @@ void mesh_path_tx_pending(struct mesh_path *mpath)
681} 934}
682 935
683/** 936/**
937 * mesh_path_send_to_gates - sends pending frames to all known mesh gates
938 *
939 * @mpath: mesh path whose queue will be emptied
940 *
941 * If there is only one gate, the frames are transferred from the failed mpath
942 * queue to that gate's queue. If there are more than one gates, the frames
943 * are copied from each gate to the next. After frames are copied, the
944 * mpath queues are emptied onto the transmission queue.
945 */
946int mesh_path_send_to_gates(struct mesh_path *mpath)
947{
948 struct ieee80211_sub_if_data *sdata = mpath->sdata;
949 struct hlist_node *n;
950 struct mesh_table *tbl;
951 struct mesh_path *from_mpath = mpath;
952 struct mpath_node *gate = NULL;
953 bool copy = false;
954 struct hlist_head *known_gates;
955
956 rcu_read_lock();
957 tbl = rcu_dereference(mesh_paths);
958 known_gates = tbl->known_gates;
959 rcu_read_unlock();
960
961 if (!known_gates)
962 return -EHOSTUNREACH;
963
964 hlist_for_each_entry_rcu(gate, n, known_gates, list) {
965 if (gate->mpath->sdata != sdata)
966 continue;
967
968 if (gate->mpath->flags & MESH_PATH_ACTIVE) {
969 mpath_dbg("Forwarding to %pM\n", gate->mpath->dst);
970 mesh_path_move_to_queue(gate->mpath, from_mpath, copy);
971 from_mpath = gate->mpath;
972 copy = true;
973 } else {
974 mpath_dbg("Not forwarding %p\n", gate->mpath);
975 mpath_dbg("flags %x\n", gate->mpath->flags);
976 }
977 }
978
979 hlist_for_each_entry_rcu(gate, n, known_gates, list)
980 if (gate->mpath->sdata == sdata) {
981 mpath_dbg("Sending to %pM\n", gate->mpath->dst);
982 mesh_path_tx_pending(gate->mpath);
983 }
984
985 return (from_mpath == mpath) ? -EHOSTUNREACH : 0;
986}
987
988/**
684 * mesh_path_discard_frame - discard a frame whose path could not be resolved 989 * mesh_path_discard_frame - discard a frame whose path could not be resolved
685 * 990 *
686 * @skb: frame to discard 991 * @skb: frame to discard
@@ -699,18 +1004,23 @@ void mesh_path_discard_frame(struct sk_buff *skb,
699 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; 1004 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
700 struct mesh_path *mpath; 1005 struct mesh_path *mpath;
701 u32 sn = 0; 1006 u32 sn = 0;
1007 __le16 reason = cpu_to_le16(WLAN_REASON_MESH_PATH_NOFORWARD);
702 1008
703 if (memcmp(hdr->addr4, sdata->vif.addr, ETH_ALEN) != 0) { 1009 if (memcmp(hdr->addr4, sdata->vif.addr, ETH_ALEN) != 0) {
704 u8 *ra, *da; 1010 u8 *ra, *da;
705 1011
706 da = hdr->addr3; 1012 da = hdr->addr3;
707 ra = hdr->addr1; 1013 ra = hdr->addr1;
1014 rcu_read_lock();
708 mpath = mesh_path_lookup(da, sdata); 1015 mpath = mesh_path_lookup(da, sdata);
709 if (mpath) 1016 if (mpath) {
1017 spin_lock_bh(&mpath->state_lock);
710 sn = ++mpath->sn; 1018 sn = ++mpath->sn;
1019 spin_unlock_bh(&mpath->state_lock);
1020 }
1021 rcu_read_unlock();
711 mesh_path_error_tx(sdata->u.mesh.mshcfg.element_ttl, skb->data, 1022 mesh_path_error_tx(sdata->u.mesh.mshcfg.element_ttl, skb->data,
712 cpu_to_le32(sn), 1023 cpu_to_le32(sn), reason, ra, sdata);
713 cpu_to_le16(PERR_RCODE_NO_ROUTE), ra, sdata);
714 } 1024 }
715 1025
716 kfree_skb(skb); 1026 kfree_skb(skb);
@@ -728,8 +1038,7 @@ void mesh_path_flush_pending(struct mesh_path *mpath)
728{ 1038{
729 struct sk_buff *skb; 1039 struct sk_buff *skb;
730 1040
731 while ((skb = skb_dequeue(&mpath->frame_queue)) && 1041 while ((skb = skb_dequeue(&mpath->frame_queue)) != NULL)
732 (mpath->flags & MESH_PATH_ACTIVE))
733 mesh_path_discard_frame(skb, mpath->sdata); 1042 mesh_path_discard_frame(skb, mpath->sdata);
734} 1043}
735 1044
@@ -790,6 +1099,7 @@ static int mesh_path_node_copy(struct hlist_node *p, struct mesh_table *newtbl)
790int mesh_pathtbl_init(void) 1099int mesh_pathtbl_init(void)
791{ 1100{
792 struct mesh_table *tbl_path, *tbl_mpp; 1101 struct mesh_table *tbl_path, *tbl_mpp;
1102 int ret;
793 1103
794 tbl_path = mesh_table_alloc(INIT_PATHS_SIZE_ORDER); 1104 tbl_path = mesh_table_alloc(INIT_PATHS_SIZE_ORDER);
795 if (!tbl_path) 1105 if (!tbl_path)
@@ -797,21 +1107,40 @@ int mesh_pathtbl_init(void)
797 tbl_path->free_node = &mesh_path_node_free; 1107 tbl_path->free_node = &mesh_path_node_free;
798 tbl_path->copy_node = &mesh_path_node_copy; 1108 tbl_path->copy_node = &mesh_path_node_copy;
799 tbl_path->mean_chain_len = MEAN_CHAIN_LEN; 1109 tbl_path->mean_chain_len = MEAN_CHAIN_LEN;
1110 tbl_path->known_gates = kzalloc(sizeof(struct hlist_head), GFP_ATOMIC);
1111 if (!tbl_path->known_gates) {
1112 ret = -ENOMEM;
1113 goto free_path;
1114 }
1115 INIT_HLIST_HEAD(tbl_path->known_gates);
1116
800 1117
801 tbl_mpp = mesh_table_alloc(INIT_PATHS_SIZE_ORDER); 1118 tbl_mpp = mesh_table_alloc(INIT_PATHS_SIZE_ORDER);
802 if (!tbl_mpp) { 1119 if (!tbl_mpp) {
803 mesh_table_free(tbl_path, true); 1120 ret = -ENOMEM;
804 return -ENOMEM; 1121 goto free_path;
805 } 1122 }
806 tbl_mpp->free_node = &mesh_path_node_free; 1123 tbl_mpp->free_node = &mesh_path_node_free;
807 tbl_mpp->copy_node = &mesh_path_node_copy; 1124 tbl_mpp->copy_node = &mesh_path_node_copy;
808 tbl_mpp->mean_chain_len = MEAN_CHAIN_LEN; 1125 tbl_mpp->mean_chain_len = MEAN_CHAIN_LEN;
1126 tbl_mpp->known_gates = kzalloc(sizeof(struct hlist_head), GFP_ATOMIC);
1127 if (!tbl_mpp->known_gates) {
1128 ret = -ENOMEM;
1129 goto free_mpp;
1130 }
1131 INIT_HLIST_HEAD(tbl_mpp->known_gates);
809 1132
810 /* Need no locking since this is during init */ 1133 /* Need no locking since this is during init */
811 RCU_INIT_POINTER(mesh_paths, tbl_path); 1134 RCU_INIT_POINTER(mesh_paths, tbl_path);
812 RCU_INIT_POINTER(mpp_paths, tbl_mpp); 1135 RCU_INIT_POINTER(mpp_paths, tbl_mpp);
813 1136
814 return 0; 1137 return 0;
1138
1139free_mpp:
1140 mesh_table_free(tbl_mpp, true);
1141free_path:
1142 mesh_table_free(tbl_path, true);
1143 return ret;
815} 1144}
816 1145
817void mesh_path_expire(struct ieee80211_sub_if_data *sdata) 1146void mesh_path_expire(struct ieee80211_sub_if_data *sdata)
@@ -828,14 +1157,10 @@ void mesh_path_expire(struct ieee80211_sub_if_data *sdata)
828 if (node->mpath->sdata != sdata) 1157 if (node->mpath->sdata != sdata)
829 continue; 1158 continue;
830 mpath = node->mpath; 1159 mpath = node->mpath;
831 spin_lock_bh(&mpath->state_lock);
832 if ((!(mpath->flags & MESH_PATH_RESOLVING)) && 1160 if ((!(mpath->flags & MESH_PATH_RESOLVING)) &&
833 (!(mpath->flags & MESH_PATH_FIXED)) && 1161 (!(mpath->flags & MESH_PATH_FIXED)) &&
834 time_after(jiffies, mpath->exp_time + MESH_PATH_EXPIRE)) { 1162 time_after(jiffies, mpath->exp_time + MESH_PATH_EXPIRE))
835 spin_unlock_bh(&mpath->state_lock);
836 mesh_path_del(mpath->dst, mpath->sdata); 1163 mesh_path_del(mpath->dst, mpath->sdata);
837 } else
838 spin_unlock_bh(&mpath->state_lock);
839 } 1164 }
840 rcu_read_unlock(); 1165 rcu_read_unlock();
841} 1166}
@@ -843,6 +1168,6 @@ void mesh_path_expire(struct ieee80211_sub_if_data *sdata)
843void mesh_pathtbl_unregister(void) 1168void mesh_pathtbl_unregister(void)
844{ 1169{
845 /* no need for locking during exit path */ 1170 /* no need for locking during exit path */
846 mesh_table_free(rcu_dereference_raw(mesh_paths), true); 1171 mesh_table_free(rcu_dereference_protected(mesh_paths, 1), true);
847 mesh_table_free(rcu_dereference_raw(mpp_paths), true); 1172 mesh_table_free(rcu_dereference_protected(mpp_paths, 1), true);
848} 1173}