diff options
-rw-r--r-- | include/linux/ieee80211.h | 4 | ||||
-rw-r--r-- | net/mac80211/ieee80211_i.h | 1 | ||||
-rw-r--r-- | net/mac80211/mesh.c | 3 | ||||
-rw-r--r-- | net/mac80211/mesh.h | 11 | ||||
-rw-r--r-- | net/mac80211/mesh_hwmp.c | 41 | ||||
-rw-r--r-- | net/mac80211/mesh_pathtbl.c | 284 |
6 files changed, 335 insertions, 9 deletions
diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h index 03cfbf393a63..37f95f2e10f9 100644 --- a/include/linux/ieee80211.h +++ b/include/linux/ieee80211.h | |||
@@ -633,6 +633,10 @@ struct ieee80211_rann_ie { | |||
633 | u32 rann_metric; | 633 | u32 rann_metric; |
634 | } __attribute__ ((packed)); | 634 | } __attribute__ ((packed)); |
635 | 635 | ||
636 | enum ieee80211_rann_flags { | ||
637 | RANN_FLAG_IS_GATE = 1 << 0, | ||
638 | }; | ||
639 | |||
636 | #define WLAN_SA_QUERY_TR_ID_LEN 2 | 640 | #define WLAN_SA_QUERY_TR_ID_LEN 2 |
637 | 641 | ||
638 | struct ieee80211_mgmt { | 642 | struct ieee80211_mgmt { |
diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h index ea7419050846..c204cee1189c 100644 --- a/net/mac80211/ieee80211_i.h +++ b/net/mac80211/ieee80211_i.h | |||
@@ -514,6 +514,7 @@ struct ieee80211_if_mesh { | |||
514 | struct mesh_config mshcfg; | 514 | struct mesh_config mshcfg; |
515 | u32 mesh_seqnum; | 515 | u32 mesh_seqnum; |
516 | bool accepting_plinks; | 516 | bool accepting_plinks; |
517 | int num_gates; | ||
517 | const u8 *ie; | 518 | const u8 *ie; |
518 | u8 ie_len; | 519 | u8 ie_len; |
519 | enum { | 520 | enum { |
diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c index ecdde6ce4df0..e120fefb4e40 100644 --- a/net/mac80211/mesh.c +++ b/net/mac80211/mesh.c | |||
@@ -545,7 +545,7 @@ void ieee80211_mesh_quiesce(struct ieee80211_sub_if_data *sdata) | |||
545 | { | 545 | { |
546 | struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; | 546 | struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; |
547 | 547 | ||
548 | /* use atomic bitops in case both timers fire at the same time */ | 548 | /* use atomic bitops in case all timers fire at the same time */ |
549 | 549 | ||
550 | if (del_timer_sync(&ifmsh->housekeeping_timer)) | 550 | if (del_timer_sync(&ifmsh->housekeeping_timer)) |
551 | set_bit(TMR_RUNNING_HK, &ifmsh->timers_running); | 551 | set_bit(TMR_RUNNING_HK, &ifmsh->timers_running); |
@@ -752,6 +752,7 @@ void ieee80211_mesh_init_sdata(struct ieee80211_sub_if_data *sdata) | |||
752 | ifmsh->accepting_plinks = true; | 752 | ifmsh->accepting_plinks = true; |
753 | ifmsh->preq_id = 0; | 753 | ifmsh->preq_id = 0; |
754 | ifmsh->sn = 0; | 754 | ifmsh->sn = 0; |
755 | ifmsh->num_gates = 0; | ||
755 | atomic_set(&ifmsh->mpaths, 0); | 756 | atomic_set(&ifmsh->mpaths, 0); |
756 | mesh_rmc_init(sdata); | 757 | mesh_rmc_init(sdata); |
757 | ifmsh->last_preq = jiffies; | 758 | ifmsh->last_preq = jiffies; |
diff --git a/net/mac80211/mesh.h b/net/mac80211/mesh.h index 3c7d0f8b376a..9d9116e1a9ac 100644 --- a/net/mac80211/mesh.h +++ b/net/mac80211/mesh.h | |||
@@ -81,6 +81,7 @@ enum mesh_deferred_task_flags { | |||
81 | * @discovery_retries: number of discovery retries | 81 | * @discovery_retries: number of discovery retries |
82 | * @flags: mesh path flags, as specified on &enum mesh_path_flags | 82 | * @flags: mesh path flags, as specified on &enum mesh_path_flags |
83 | * @state_lock: mesh path state lock | 83 | * @state_lock: mesh path state lock |
84 | * @is_gate: the destination station of this path is a mesh gate | ||
84 | * | 85 | * |
85 | * | 86 | * |
86 | * The combination of dst and sdata is unique in the mesh path table. Since the | 87 | * The combination of dst and sdata is unique in the mesh path table. Since the |
@@ -104,6 +105,7 @@ struct mesh_path { | |||
104 | u8 discovery_retries; | 105 | u8 discovery_retries; |
105 | enum mesh_path_flags flags; | 106 | enum mesh_path_flags flags; |
106 | spinlock_t state_lock; | 107 | spinlock_t state_lock; |
108 | bool is_gate; | ||
107 | }; | 109 | }; |
108 | 110 | ||
109 | /** | 111 | /** |
@@ -120,6 +122,9 @@ struct mesh_path { | |||
120 | * buckets | 122 | * buckets |
121 | * @mean_chain_len: maximum average length for the hash buckets' list, if it is | 123 | * @mean_chain_len: maximum average length for the hash buckets' list, if it is |
122 | * reached, the table will grow | 124 | * reached, the table will grow |
125 | * @known_gates: list of known mesh gates and their mpaths by the station. The | ||
126 | * gate's mpath may or may not be resolved and active. | ||
127 | * | ||
123 | * rcu_head: RCU head to free the table | 128 | * rcu_head: RCU head to free the table |
124 | */ | 129 | */ |
125 | struct mesh_table { | 130 | struct mesh_table { |
@@ -133,6 +138,8 @@ struct mesh_table { | |||
133 | int (*copy_node) (struct hlist_node *p, struct mesh_table *newtbl); | 138 | int (*copy_node) (struct hlist_node *p, struct mesh_table *newtbl); |
134 | int size_order; | 139 | int size_order; |
135 | int mean_chain_len; | 140 | int mean_chain_len; |
141 | struct hlist_head *known_gates; | ||
142 | spinlock_t gates_lock; | ||
136 | 143 | ||
137 | struct rcu_head rcu_head; | 144 | struct rcu_head rcu_head; |
138 | }; | 145 | }; |
@@ -236,6 +243,10 @@ void mesh_path_flush(struct ieee80211_sub_if_data *sdata); | |||
236 | void mesh_rx_path_sel_frame(struct ieee80211_sub_if_data *sdata, | 243 | void mesh_rx_path_sel_frame(struct ieee80211_sub_if_data *sdata, |
237 | struct ieee80211_mgmt *mgmt, size_t len); | 244 | struct ieee80211_mgmt *mgmt, size_t len); |
238 | int mesh_path_add(u8 *dst, struct ieee80211_sub_if_data *sdata); | 245 | int mesh_path_add(u8 *dst, struct ieee80211_sub_if_data *sdata); |
246 | |||
247 | int mesh_path_add_gate(struct mesh_path *mpath); | ||
248 | int mesh_path_send_to_gates(struct mesh_path *mpath); | ||
249 | int mesh_gate_num(struct ieee80211_sub_if_data *sdata); | ||
239 | /* Mesh plinks */ | 250 | /* Mesh plinks */ |
240 | void mesh_neighbour_update(u8 *hw_addr, u32 rates, | 251 | void mesh_neighbour_update(u8 *hw_addr, u32 rates, |
241 | struct ieee80211_sub_if_data *sdata, | 252 | struct ieee80211_sub_if_data *sdata, |
diff --git a/net/mac80211/mesh_hwmp.c b/net/mac80211/mesh_hwmp.c index abd03473cca4..7b517c46100d 100644 --- a/net/mac80211/mesh_hwmp.c +++ b/net/mac80211/mesh_hwmp.c | |||
@@ -696,6 +696,7 @@ static void hwmp_rann_frame_process(struct ieee80211_sub_if_data *sdata, | |||
696 | u8 *orig_addr; | 696 | u8 *orig_addr; |
697 | u32 orig_sn, metric; | 697 | u32 orig_sn, metric; |
698 | u32 interval = cpu_to_le32(IEEE80211_MESH_RANN_INTERVAL); | 698 | u32 interval = cpu_to_le32(IEEE80211_MESH_RANN_INTERVAL); |
699 | bool root_is_gate; | ||
699 | 700 | ||
700 | ttl = rann->rann_ttl; | 701 | ttl = rann->rann_ttl; |
701 | if (ttl <= 1) { | 702 | if (ttl <= 1) { |
@@ -704,12 +705,19 @@ static void hwmp_rann_frame_process(struct ieee80211_sub_if_data *sdata, | |||
704 | } | 705 | } |
705 | ttl--; | 706 | ttl--; |
706 | flags = rann->rann_flags; | 707 | flags = rann->rann_flags; |
708 | root_is_gate = !!(flags & RANN_FLAG_IS_GATE); | ||
707 | orig_addr = rann->rann_addr; | 709 | orig_addr = rann->rann_addr; |
708 | orig_sn = rann->rann_seq; | 710 | orig_sn = rann->rann_seq; |
709 | hopcount = rann->rann_hopcount; | 711 | hopcount = rann->rann_hopcount; |
710 | hopcount++; | 712 | hopcount++; |
711 | metric = rann->rann_metric; | 713 | metric = rann->rann_metric; |
712 | mhwmp_dbg("received RANN from %pM\n", orig_addr); | 714 | |
715 | /* Ignore our own RANNs */ | ||
716 | if (memcmp(orig_addr, sdata->vif.addr, ETH_ALEN) == 0) | ||
717 | return; | ||
718 | |||
719 | mhwmp_dbg("received RANN from %pM (is_gate=%d)", orig_addr, | ||
720 | root_is_gate); | ||
713 | 721 | ||
714 | rcu_read_lock(); | 722 | rcu_read_lock(); |
715 | mpath = mesh_path_lookup(orig_addr, sdata); | 723 | mpath = mesh_path_lookup(orig_addr, sdata); |
@@ -721,9 +729,16 @@ static void hwmp_rann_frame_process(struct ieee80211_sub_if_data *sdata, | |||
721 | sdata->u.mesh.mshstats.dropped_frames_no_route++; | 729 | sdata->u.mesh.mshstats.dropped_frames_no_route++; |
722 | return; | 730 | return; |
723 | } | 731 | } |
724 | mesh_queue_preq(mpath, | ||
725 | PREQ_Q_F_START | PREQ_Q_F_REFRESH); | ||
726 | } | 732 | } |
733 | |||
734 | if ((!(mpath->flags & (MESH_PATH_ACTIVE | MESH_PATH_RESOLVING)) || | ||
735 | time_after(jiffies, mpath->exp_time - 1*HZ)) && | ||
736 | !(mpath->flags & MESH_PATH_FIXED)) { | ||
737 | mhwmp_dbg("%s time to refresh root mpath %pM", sdata->name, | ||
738 | orig_addr); | ||
739 | mesh_queue_preq(mpath, PREQ_Q_F_START | PREQ_Q_F_REFRESH); | ||
740 | } | ||
741 | |||
727 | if (mpath->sn < orig_sn) { | 742 | if (mpath->sn < orig_sn) { |
728 | mesh_path_sel_frame_tx(MPATH_RANN, flags, orig_addr, | 743 | mesh_path_sel_frame_tx(MPATH_RANN, flags, orig_addr, |
729 | cpu_to_le32(orig_sn), | 744 | cpu_to_le32(orig_sn), |
@@ -733,6 +748,9 @@ static void hwmp_rann_frame_process(struct ieee80211_sub_if_data *sdata, | |||
733 | 0, sdata); | 748 | 0, sdata); |
734 | mpath->sn = orig_sn; | 749 | mpath->sn = orig_sn; |
735 | } | 750 | } |
751 | if (root_is_gate) | ||
752 | mesh_path_add_gate(mpath); | ||
753 | |||
736 | rcu_read_unlock(); | 754 | rcu_read_unlock(); |
737 | } | 755 | } |
738 | 756 | ||
@@ -994,25 +1012,32 @@ void mesh_path_timer(unsigned long data) | |||
994 | { | 1012 | { |
995 | struct mesh_path *mpath = (void *) data; | 1013 | struct mesh_path *mpath = (void *) data; |
996 | struct ieee80211_sub_if_data *sdata = mpath->sdata; | 1014 | struct ieee80211_sub_if_data *sdata = mpath->sdata; |
1015 | int ret; | ||
997 | 1016 | ||
998 | if (sdata->local->quiescing) | 1017 | if (sdata->local->quiescing) |
999 | return; | 1018 | return; |
1000 | 1019 | ||
1001 | spin_lock_bh(&mpath->state_lock); | 1020 | spin_lock_bh(&mpath->state_lock); |
1002 | if (mpath->flags & MESH_PATH_RESOLVED || | 1021 | if (mpath->flags & MESH_PATH_RESOLVED || |
1003 | (!(mpath->flags & MESH_PATH_RESOLVING))) | 1022 | (!(mpath->flags & MESH_PATH_RESOLVING))) { |
1004 | mpath->flags &= ~(MESH_PATH_RESOLVING | MESH_PATH_RESOLVED); | 1023 | mpath->flags &= ~(MESH_PATH_RESOLVING | MESH_PATH_RESOLVED); |
1005 | else if (mpath->discovery_retries < max_preq_retries(sdata)) { | 1024 | spin_unlock_bh(&mpath->state_lock); |
1025 | } else if (mpath->discovery_retries < max_preq_retries(sdata)) { | ||
1006 | ++mpath->discovery_retries; | 1026 | ++mpath->discovery_retries; |
1007 | mpath->discovery_timeout *= 2; | 1027 | mpath->discovery_timeout *= 2; |
1028 | spin_unlock_bh(&mpath->state_lock); | ||
1008 | mesh_queue_preq(mpath, 0); | 1029 | mesh_queue_preq(mpath, 0); |
1009 | } else { | 1030 | } else { |
1010 | mpath->flags = 0; | 1031 | mpath->flags = 0; |
1011 | mpath->exp_time = jiffies; | 1032 | mpath->exp_time = jiffies; |
1012 | mesh_path_flush_pending(mpath); | 1033 | spin_unlock_bh(&mpath->state_lock); |
1034 | if (!mpath->is_gate && mesh_gate_num(sdata) > 0) { | ||
1035 | ret = mesh_path_send_to_gates(mpath); | ||
1036 | if (ret) | ||
1037 | mhwmp_dbg("no gate was reachable"); | ||
1038 | } else | ||
1039 | mesh_path_flush_pending(mpath); | ||
1013 | } | 1040 | } |
1014 | |||
1015 | spin_unlock_bh(&mpath->state_lock); | ||
1016 | } | 1041 | } |
1017 | 1042 | ||
1018 | void | 1043 | void |
diff --git a/net/mac80211/mesh_pathtbl.c b/net/mac80211/mesh_pathtbl.c index bcf7fee53b2c..75e4b6022b86 100644 --- a/net/mac80211/mesh_pathtbl.c +++ b/net/mac80211/mesh_pathtbl.c | |||
@@ -66,6 +66,8 @@ static inline struct mesh_table *resize_dereference_mpp_paths(void) | |||
66 | lockdep_is_held(&pathtbl_resize_lock)); | 66 | lockdep_is_held(&pathtbl_resize_lock)); |
67 | } | 67 | } |
68 | 68 | ||
69 | static int mesh_gate_add(struct mesh_table *tbl, struct mesh_path *mpath); | ||
70 | |||
69 | /* | 71 | /* |
70 | * CAREFUL -- "tbl" must not be an expression, | 72 | * CAREFUL -- "tbl" must not be an expression, |
71 | * in particular not an rcu_dereference(), since | 73 | * in particular not an rcu_dereference(), since |
@@ -109,6 +111,7 @@ static struct mesh_table *mesh_table_alloc(int size_order) | |||
109 | sizeof(newtbl->hash_rnd)); | 111 | sizeof(newtbl->hash_rnd)); |
110 | for (i = 0; i <= newtbl->hash_mask; i++) | 112 | for (i = 0; i <= newtbl->hash_mask; i++) |
111 | spin_lock_init(&newtbl->hashwlock[i]); | 113 | spin_lock_init(&newtbl->hashwlock[i]); |
114 | spin_lock_init(&newtbl->gates_lock); | ||
112 | 115 | ||
113 | return newtbl; | 116 | return newtbl; |
114 | } | 117 | } |
@@ -124,6 +127,7 @@ static void mesh_table_free(struct mesh_table *tbl, bool free_leafs) | |||
124 | { | 127 | { |
125 | struct hlist_head *mesh_hash; | 128 | struct hlist_head *mesh_hash; |
126 | struct hlist_node *p, *q; | 129 | struct hlist_node *p, *q; |
130 | struct mpath_node *gate; | ||
127 | int i; | 131 | int i; |
128 | 132 | ||
129 | mesh_hash = tbl->hash_buckets; | 133 | mesh_hash = tbl->hash_buckets; |
@@ -135,6 +139,17 @@ static void mesh_table_free(struct mesh_table *tbl, bool free_leafs) | |||
135 | } | 139 | } |
136 | spin_unlock_bh(&tbl->hashwlock[i]); | 140 | spin_unlock_bh(&tbl->hashwlock[i]); |
137 | } | 141 | } |
142 | if (free_leafs) { | ||
143 | spin_lock_bh(&tbl->gates_lock); | ||
144 | hlist_for_each_entry_safe(gate, p, q, | ||
145 | tbl->known_gates, list) { | ||
146 | hlist_del(&gate->list); | ||
147 | kfree(gate); | ||
148 | } | ||
149 | kfree(tbl->known_gates); | ||
150 | spin_unlock_bh(&tbl->gates_lock); | ||
151 | } | ||
152 | |||
138 | __mesh_table_free(tbl); | 153 | __mesh_table_free(tbl); |
139 | } | 154 | } |
140 | 155 | ||
@@ -152,6 +167,7 @@ static int mesh_table_grow(struct mesh_table *oldtbl, | |||
152 | newtbl->free_node = oldtbl->free_node; | 167 | newtbl->free_node = oldtbl->free_node; |
153 | newtbl->mean_chain_len = oldtbl->mean_chain_len; | 168 | newtbl->mean_chain_len = oldtbl->mean_chain_len; |
154 | newtbl->copy_node = oldtbl->copy_node; | 169 | newtbl->copy_node = oldtbl->copy_node; |
170 | newtbl->known_gates = oldtbl->known_gates; | ||
155 | atomic_set(&newtbl->entries, atomic_read(&oldtbl->entries)); | 171 | atomic_set(&newtbl->entries, atomic_read(&oldtbl->entries)); |
156 | 172 | ||
157 | oldhash = oldtbl->hash_buckets; | 173 | oldhash = oldtbl->hash_buckets; |
@@ -211,6 +227,111 @@ void mesh_path_assign_nexthop(struct mesh_path *mpath, struct sta_info *sta) | |||
211 | spin_unlock_irqrestore(&mpath->frame_queue.lock, flags); | 227 | spin_unlock_irqrestore(&mpath->frame_queue.lock, flags); |
212 | } | 228 | } |
213 | 229 | ||
230 | static void prepare_for_gate(struct sk_buff *skb, char *dst_addr, | ||
231 | struct mesh_path *gate_mpath) | ||
232 | { | ||
233 | struct ieee80211_hdr *hdr; | ||
234 | struct ieee80211s_hdr *mshdr; | ||
235 | int mesh_hdrlen, hdrlen; | ||
236 | char *next_hop; | ||
237 | |||
238 | hdr = (struct ieee80211_hdr *) skb->data; | ||
239 | hdrlen = ieee80211_hdrlen(hdr->frame_control); | ||
240 | mshdr = (struct ieee80211s_hdr *) (skb->data + hdrlen); | ||
241 | |||
242 | if (!(mshdr->flags & MESH_FLAGS_AE)) { | ||
243 | /* size of the fixed part of the mesh header */ | ||
244 | mesh_hdrlen = 6; | ||
245 | |||
246 | /* make room for the two extended addresses */ | ||
247 | skb_push(skb, 2 * ETH_ALEN); | ||
248 | memmove(skb->data, hdr, hdrlen + mesh_hdrlen); | ||
249 | |||
250 | hdr = (struct ieee80211_hdr *) skb->data; | ||
251 | |||
252 | /* we preserve the previous mesh header and only add | ||
253 | * the new addreses */ | ||
254 | mshdr = (struct ieee80211s_hdr *) (skb->data + hdrlen); | ||
255 | mshdr->flags = MESH_FLAGS_AE_A5_A6; | ||
256 | memcpy(mshdr->eaddr1, hdr->addr3, ETH_ALEN); | ||
257 | memcpy(mshdr->eaddr2, hdr->addr4, ETH_ALEN); | ||
258 | } | ||
259 | |||
260 | /* update next hop */ | ||
261 | hdr = (struct ieee80211_hdr *) skb->data; | ||
262 | rcu_read_lock(); | ||
263 | next_hop = rcu_dereference(gate_mpath->next_hop)->sta.addr; | ||
264 | memcpy(hdr->addr1, next_hop, ETH_ALEN); | ||
265 | rcu_read_unlock(); | ||
266 | memcpy(hdr->addr3, dst_addr, ETH_ALEN); | ||
267 | } | ||
268 | |||
269 | /** | ||
270 | * | ||
271 | * mesh_path_move_to_queue - Move or copy frames from one mpath queue to another | ||
272 | * | ||
273 | * This function is used to transfer or copy frames from an unresolved mpath to | ||
274 | * a gate mpath. The function also adds the Address Extension field and | ||
275 | * updates the next hop. | ||
276 | * | ||
277 | * If a frame already has an Address Extension field, only the next hop and | ||
278 | * destination addresses are updated. | ||
279 | * | ||
280 | * The gate mpath must be an active mpath with a valid mpath->next_hop. | ||
281 | * | ||
282 | * @mpath: An active mpath the frames will be sent to (i.e. the gate) | ||
283 | * @from_mpath: The failed mpath | ||
284 | * @copy: When true, copy all the frames to the new mpath queue. When false, | ||
285 | * move them. | ||
286 | */ | ||
287 | static void mesh_path_move_to_queue(struct mesh_path *gate_mpath, | ||
288 | struct mesh_path *from_mpath, | ||
289 | bool copy) | ||
290 | { | ||
291 | struct sk_buff *skb, *cp_skb; | ||
292 | struct sk_buff_head gateq, failq; | ||
293 | unsigned long flags; | ||
294 | int num_skbs; | ||
295 | |||
296 | BUG_ON(gate_mpath == from_mpath); | ||
297 | BUG_ON(!gate_mpath->next_hop); | ||
298 | |||
299 | __skb_queue_head_init(&gateq); | ||
300 | __skb_queue_head_init(&failq); | ||
301 | |||
302 | spin_lock_irqsave(&from_mpath->frame_queue.lock, flags); | ||
303 | skb_queue_splice_init(&from_mpath->frame_queue, &failq); | ||
304 | spin_unlock_irqrestore(&from_mpath->frame_queue.lock, flags); | ||
305 | |||
306 | num_skbs = skb_queue_len(&failq); | ||
307 | |||
308 | while (num_skbs--) { | ||
309 | skb = __skb_dequeue(&failq); | ||
310 | if (copy) | ||
311 | cp_skb = skb_copy(skb, GFP_ATOMIC); | ||
312 | |||
313 | prepare_for_gate(skb, gate_mpath->dst, gate_mpath); | ||
314 | __skb_queue_tail(&gateq, skb); | ||
315 | |||
316 | if (copy && cp_skb) | ||
317 | __skb_queue_tail(&failq, cp_skb); | ||
318 | } | ||
319 | |||
320 | spin_lock_irqsave(&gate_mpath->frame_queue.lock, flags); | ||
321 | skb_queue_splice(&gateq, &gate_mpath->frame_queue); | ||
322 | mpath_dbg("Mpath queue for gate %pM has %d frames\n", | ||
323 | gate_mpath->dst, | ||
324 | skb_queue_len(&gate_mpath->frame_queue)); | ||
325 | spin_unlock_irqrestore(&gate_mpath->frame_queue.lock, flags); | ||
326 | |||
327 | if (!copy) | ||
328 | return; | ||
329 | |||
330 | spin_lock_irqsave(&from_mpath->frame_queue.lock, flags); | ||
331 | skb_queue_splice(&failq, &from_mpath->frame_queue); | ||
332 | spin_unlock_irqrestore(&from_mpath->frame_queue.lock, flags); | ||
333 | } | ||
334 | |||
214 | 335 | ||
215 | /** | 336 | /** |
216 | * mesh_path_lookup - look up a path in the mesh path table | 337 | * mesh_path_lookup - look up a path in the mesh path table |
@@ -310,6 +431,109 @@ struct mesh_path *mesh_path_lookup_by_idx(int idx, struct ieee80211_sub_if_data | |||
310 | return NULL; | 431 | return NULL; |
311 | } | 432 | } |
312 | 433 | ||
434 | static void mesh_gate_node_reclaim(struct rcu_head *rp) | ||
435 | { | ||
436 | struct mpath_node *node = container_of(rp, struct mpath_node, rcu); | ||
437 | kfree(node); | ||
438 | } | ||
439 | |||
440 | /** | ||
441 | * mesh_gate_add - mark mpath as path to a mesh gate and add to known_gates | ||
442 | * @mesh_tbl: table which contains known_gates list | ||
443 | * @mpath: mpath to known mesh gate | ||
444 | * | ||
445 | * Returns: 0 on success | ||
446 | * | ||
447 | */ | ||
448 | static int mesh_gate_add(struct mesh_table *tbl, struct mesh_path *mpath) | ||
449 | { | ||
450 | struct mpath_node *gate, *new_gate; | ||
451 | struct hlist_node *n; | ||
452 | int err; | ||
453 | |||
454 | rcu_read_lock(); | ||
455 | tbl = rcu_dereference(tbl); | ||
456 | |||
457 | hlist_for_each_entry_rcu(gate, n, tbl->known_gates, list) | ||
458 | if (gate->mpath == mpath) { | ||
459 | err = -EEXIST; | ||
460 | goto err_rcu; | ||
461 | } | ||
462 | |||
463 | new_gate = kzalloc(sizeof(struct mpath_node), GFP_ATOMIC); | ||
464 | if (!new_gate) { | ||
465 | err = -ENOMEM; | ||
466 | goto err_rcu; | ||
467 | } | ||
468 | |||
469 | mpath->is_gate = true; | ||
470 | mpath->sdata->u.mesh.num_gates++; | ||
471 | new_gate->mpath = mpath; | ||
472 | spin_lock_bh(&tbl->gates_lock); | ||
473 | hlist_add_head_rcu(&new_gate->list, tbl->known_gates); | ||
474 | spin_unlock_bh(&tbl->gates_lock); | ||
475 | rcu_read_unlock(); | ||
476 | mpath_dbg("Mesh path (%s): Recorded new gate: %pM. %d known gates\n", | ||
477 | mpath->sdata->name, mpath->dst, | ||
478 | mpath->sdata->u.mesh.num_gates); | ||
479 | return 0; | ||
480 | err_rcu: | ||
481 | rcu_read_unlock(); | ||
482 | return err; | ||
483 | } | ||
484 | |||
485 | /** | ||
486 | * mesh_gate_del - remove a mesh gate from the list of known gates | ||
487 | * @tbl: table which holds our list of known gates | ||
488 | * @mpath: gate mpath | ||
489 | * | ||
490 | * Returns: 0 on success | ||
491 | * | ||
492 | * Locking: must be called inside rcu_read_lock() section | ||
493 | */ | ||
494 | static int mesh_gate_del(struct mesh_table *tbl, struct mesh_path *mpath) | ||
495 | { | ||
496 | struct mpath_node *gate; | ||
497 | struct hlist_node *p, *q; | ||
498 | |||
499 | tbl = rcu_dereference(tbl); | ||
500 | |||
501 | hlist_for_each_entry_safe(gate, p, q, tbl->known_gates, list) | ||
502 | if (gate->mpath == mpath) { | ||
503 | spin_lock_bh(&tbl->gates_lock); | ||
504 | hlist_del_rcu(&gate->list); | ||
505 | call_rcu(&gate->rcu, mesh_gate_node_reclaim); | ||
506 | spin_unlock_bh(&tbl->gates_lock); | ||
507 | mpath->sdata->u.mesh.num_gates--; | ||
508 | mpath->is_gate = false; | ||
509 | mpath_dbg("Mesh path (%s): Deleted gate: %pM. " | ||
510 | "%d known gates\n", mpath->sdata->name, | ||
511 | mpath->dst, mpath->sdata->u.mesh.num_gates); | ||
512 | break; | ||
513 | } | ||
514 | |||
515 | return 0; | ||
516 | } | ||
517 | |||
518 | /** | ||
519 | * | ||
520 | * mesh_path_add_gate - add the given mpath to a mesh gate to our path table | ||
521 | * @mpath: gate path to add to table | ||
522 | */ | ||
523 | int mesh_path_add_gate(struct mesh_path *mpath) | ||
524 | { | ||
525 | return mesh_gate_add(mesh_paths, mpath); | ||
526 | } | ||
527 | |||
528 | /** | ||
529 | * mesh_gate_num - number of gates known to this interface | ||
530 | * @sdata: subif data | ||
531 | */ | ||
532 | int mesh_gate_num(struct ieee80211_sub_if_data *sdata) | ||
533 | { | ||
534 | return sdata->u.mesh.num_gates; | ||
535 | } | ||
536 | |||
313 | /** | 537 | /** |
314 | * mesh_path_add - allocate and add a new path to the mesh path table | 538 | * mesh_path_add - allocate and add a new path to the mesh path table |
315 | * @addr: destination address of the path (ETH_ALEN length) | 539 | * @addr: destination address of the path (ETH_ALEN length) |
@@ -655,6 +879,8 @@ int mesh_path_del(u8 *addr, struct ieee80211_sub_if_data *sdata) | |||
655 | if (mpath->sdata == sdata && | 879 | if (mpath->sdata == sdata && |
656 | memcmp(addr, mpath->dst, ETH_ALEN) == 0) { | 880 | memcmp(addr, mpath->dst, ETH_ALEN) == 0) { |
657 | spin_lock_bh(&mpath->state_lock); | 881 | spin_lock_bh(&mpath->state_lock); |
882 | if (mpath->is_gate) | ||
883 | mesh_gate_del(tbl, mpath); | ||
658 | mpath->flags |= MESH_PATH_RESOLVING; | 884 | mpath->flags |= MESH_PATH_RESOLVING; |
659 | hlist_del_rcu(&node->list); | 885 | hlist_del_rcu(&node->list); |
660 | call_rcu(&node->rcu, mesh_path_node_reclaim); | 886 | call_rcu(&node->rcu, mesh_path_node_reclaim); |
@@ -688,6 +914,58 @@ void mesh_path_tx_pending(struct mesh_path *mpath) | |||
688 | } | 914 | } |
689 | 915 | ||
690 | /** | 916 | /** |
917 | * mesh_path_send_to_gates - sends pending frames to all known mesh gates | ||
918 | * | ||
919 | * @mpath: mesh path whose queue will be emptied | ||
920 | * | ||
921 | * If there is only one gate, the frames are transferred from the failed mpath | ||
922 | * queue to that gate's queue. If there are more than one gates, the frames | ||
923 | * are copied from each gate to the next. After frames are copied, the | ||
924 | * mpath queues are emptied onto the transmission queue. | ||
925 | */ | ||
926 | int mesh_path_send_to_gates(struct mesh_path *mpath) | ||
927 | { | ||
928 | struct ieee80211_sub_if_data *sdata = mpath->sdata; | ||
929 | struct hlist_node *n; | ||
930 | struct mesh_table *tbl; | ||
931 | struct mesh_path *from_mpath = mpath; | ||
932 | struct mpath_node *gate = NULL; | ||
933 | bool copy = false; | ||
934 | struct hlist_head *known_gates; | ||
935 | |||
936 | rcu_read_lock(); | ||
937 | tbl = rcu_dereference(mesh_paths); | ||
938 | known_gates = tbl->known_gates; | ||
939 | rcu_read_unlock(); | ||
940 | |||
941 | if (!known_gates) | ||
942 | return -EHOSTUNREACH; | ||
943 | |||
944 | hlist_for_each_entry_rcu(gate, n, known_gates, list) { | ||
945 | if (gate->mpath->sdata != sdata) | ||
946 | continue; | ||
947 | |||
948 | if (gate->mpath->flags & MESH_PATH_ACTIVE) { | ||
949 | mpath_dbg("Forwarding to %pM\n", gate->mpath->dst); | ||
950 | mesh_path_move_to_queue(gate->mpath, from_mpath, copy); | ||
951 | from_mpath = gate->mpath; | ||
952 | copy = true; | ||
953 | } else { | ||
954 | mpath_dbg("Not forwarding %p\n", gate->mpath); | ||
955 | mpath_dbg("flags %x\n", gate->mpath->flags); | ||
956 | } | ||
957 | } | ||
958 | |||
959 | hlist_for_each_entry_rcu(gate, n, known_gates, list) | ||
960 | if (gate->mpath->sdata == sdata) { | ||
961 | mpath_dbg("Sending to %pM\n", gate->mpath->dst); | ||
962 | mesh_path_tx_pending(gate->mpath); | ||
963 | } | ||
964 | |||
965 | return (from_mpath == mpath) ? -EHOSTUNREACH : 0; | ||
966 | } | ||
967 | |||
968 | /** | ||
691 | * mesh_path_discard_frame - discard a frame whose path could not be resolved | 969 | * mesh_path_discard_frame - discard a frame whose path could not be resolved |
692 | * | 970 | * |
693 | * @skb: frame to discard | 971 | * @skb: frame to discard |
@@ -804,6 +1082,9 @@ int mesh_pathtbl_init(void) | |||
804 | tbl_path->free_node = &mesh_path_node_free; | 1082 | tbl_path->free_node = &mesh_path_node_free; |
805 | tbl_path->copy_node = &mesh_path_node_copy; | 1083 | tbl_path->copy_node = &mesh_path_node_copy; |
806 | tbl_path->mean_chain_len = MEAN_CHAIN_LEN; | 1084 | tbl_path->mean_chain_len = MEAN_CHAIN_LEN; |
1085 | tbl_path->known_gates = kzalloc(sizeof(struct hlist_head), GFP_ATOMIC); | ||
1086 | INIT_HLIST_HEAD(tbl_path->known_gates); | ||
1087 | |||
807 | 1088 | ||
808 | tbl_mpp = mesh_table_alloc(INIT_PATHS_SIZE_ORDER); | 1089 | tbl_mpp = mesh_table_alloc(INIT_PATHS_SIZE_ORDER); |
809 | if (!tbl_mpp) { | 1090 | if (!tbl_mpp) { |
@@ -813,6 +1094,9 @@ int mesh_pathtbl_init(void) | |||
813 | tbl_mpp->free_node = &mesh_path_node_free; | 1094 | tbl_mpp->free_node = &mesh_path_node_free; |
814 | tbl_mpp->copy_node = &mesh_path_node_copy; | 1095 | tbl_mpp->copy_node = &mesh_path_node_copy; |
815 | tbl_mpp->mean_chain_len = MEAN_CHAIN_LEN; | 1096 | tbl_mpp->mean_chain_len = MEAN_CHAIN_LEN; |
1097 | /* XXX: not needed */ | ||
1098 | tbl_mpp->known_gates = kzalloc(sizeof(struct hlist_head), GFP_ATOMIC); | ||
1099 | INIT_HLIST_HEAD(tbl_mpp->known_gates); | ||
816 | 1100 | ||
817 | /* Need no locking since this is during init */ | 1101 | /* Need no locking since this is during init */ |
818 | RCU_INIT_POINTER(mesh_paths, tbl_path); | 1102 | RCU_INIT_POINTER(mesh_paths, tbl_path); |