aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLuis Carlos Cobo <luisca@cozybit.com>2008-02-23 09:17:14 -0500
committerJohn W. Linville <linville@tuxdriver.com>2008-03-06 15:30:42 -0500
commiteb2b9311fd00a868e9bf85ab66e86b7dee1643e1 (patch)
tree69167d3359e5bde25f51b8596b65ceae3ebb6a17
parentc3896d2ca4dd97be290f000cb1079ed759d28574 (diff)
mac80211: mesh path table implementation
The mesh path table associates destinations with the next hop to reach them. The table is a hash of linked lists protected by rcu mechanisms. Every mesh path contains a lock to protect the mesh path state. Each outgoing mesh frame requires a look up into this table. Therefore, the table it has been designed so it is not necessary to hold any lock to find the appropriate next hop. If the path is determined to be active within a rcu context we can safely dereference mpath->next_hop->addr, since it holds a reference to the sta next_hop. After a mesh path has been set active for the first time it next_hop must always point to a valid sta. If this is not possible the mpath must be deleted or replaced in a RCU safe fashion. Signed-off-by: Luis Carlos Cobo <luisca@cozybit.com> Signed-off-by: Johannes Berg <johannes@sipsolutions.net> Signed-off-by: John W. Linville <linville@tuxdriver.com>
-rw-r--r--net/mac80211/mesh_pathtbl.c522
1 files changed, 522 insertions, 0 deletions
diff --git a/net/mac80211/mesh_pathtbl.c b/net/mac80211/mesh_pathtbl.c
new file mode 100644
index 000000000000..37094942e728
--- /dev/null
+++ b/net/mac80211/mesh_pathtbl.c
@@ -0,0 +1,522 @@
1/*
2 * Copyright (c) 2008 open80211s Ltd.
3 * Author: Luis Carlos Cobo <luisca@cozybit.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 */
9
10#include <linux/etherdevice.h>
11#include <linux/list.h>
12#include <linux/netdevice.h>
13#include <linux/random.h>
14#include <linux/spinlock.h>
15#include <linux/string.h>
16#include <net/mac80211.h>
17#include "ieee80211_i.h"
18#include "mesh.h"
19
20/* There will be initially 2^INIT_PATHS_SIZE_ORDER buckets */
21#define INIT_PATHS_SIZE_ORDER 2
22
23/* Keep the mean chain length below this constant */
24#define MEAN_CHAIN_LEN 2
25
26#define MPATH_EXPIRED(mpath) ((mpath->flags & MESH_PATH_ACTIVE) && \
27 time_after(jiffies, mpath->exp_time) && \
28 !(mpath->flags & MESH_PATH_FIXED))
29
30struct mpath_node {
31 struct hlist_node list;
32 struct rcu_head rcu;
33 /* This indirection allows two different tables to point to the same
34 * mesh_path structure, useful when resizing
35 */
36 struct mesh_path *mpath;
37};
38
39static struct mesh_table *mesh_paths;
40
41/* This lock will have the grow table function as writer and add / delete nodes
42 * as readers. When reading the table (i.e. doing lookups) we are well protected
43 * by RCU
44 */
45static DEFINE_RWLOCK(pathtbl_resize_lock);
46
47/**
48 *
49 * mesh_path_assign_nexthop - update mesh path next hop
50 *
51 * @mpath: mesh path to update
52 * @sta: next hop to assign
53 *
54 * Locking: mpath->state_lock must be held when calling this function
55 */
56void mesh_path_assign_nexthop(struct mesh_path *mpath, struct sta_info *sta)
57{
58 __sta_info_get(sta);
59 if (mpath->next_hop)
60 sta_info_put(mpath->next_hop);
61 mpath->next_hop = sta;
62}
63
64
65/**
66 * mesh_path_lookup - look up a path in the mesh path table
67 * @dst: hardware address (ETH_ALEN length) of destination
68 * @dev: local interface
69 *
70 * Returns: pointer to the mesh path structure, or NULL if not found
71 *
72 * Locking: must be called within a read rcu section.
73 */
74struct mesh_path *mesh_path_lookup(u8 *dst, struct net_device *dev)
75{
76 struct mesh_path *mpath;
77 struct hlist_node *n;
78 struct hlist_head *bucket;
79 struct mesh_table *tbl;
80 struct mpath_node *node;
81
82 tbl = rcu_dereference(mesh_paths);
83
84 bucket = &tbl->hash_buckets[mesh_table_hash(dst, dev, tbl)];
85 hlist_for_each_entry_rcu(node, n, bucket, list) {
86 mpath = node->mpath;
87 if (mpath->dev == dev &&
88 memcmp(dst, mpath->dst, ETH_ALEN) == 0) {
89 if (MPATH_EXPIRED(mpath)) {
90 spin_lock_bh(&mpath->state_lock);
91 if (MPATH_EXPIRED(mpath))
92 mpath->flags &= ~MESH_PATH_ACTIVE;
93 spin_unlock_bh(&mpath->state_lock);
94 }
95 return mpath;
96 }
97 }
98 return NULL;
99}
100
101/**
102 * mesh_path_lookup_by_idx - look up a path in the mesh path table by its index
103 * @idx: index
104 * @dev: local interface
105 *
106 * Returns: pointer to the mesh path structure, or NULL if not found.
107 *
108 * Locking: must be called within a read rcu section.
109 */
110struct mesh_path *mesh_path_lookup_by_idx(int idx, struct net_device *dev)
111{
112 struct mpath_node *node;
113 struct hlist_node *p;
114 int i;
115 int j = 0;
116
117 for_each_mesh_entry(mesh_paths, p, node, i)
118 if (j++ == idx) {
119 if (MPATH_EXPIRED(node->mpath)) {
120 spin_lock_bh(&node->mpath->state_lock);
121 if (MPATH_EXPIRED(node->mpath))
122 node->mpath->flags &= ~MESH_PATH_ACTIVE;
123 spin_unlock_bh(&node->mpath->state_lock);
124 }
125 return node->mpath;
126 }
127
128 return NULL;
129}
130
131/**
132 * mesh_path_add - allocate and add a new path to the mesh path table
133 * @addr: destination address of the path (ETH_ALEN length)
134 * @dev: local interface
135 *
136 * Returns: 0 on sucess
137 *
138 * State: the initial state of the new path is set to 0
139 */
140int mesh_path_add(u8 *dst, struct net_device *dev)
141{
142 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
143 struct mesh_path *mpath, *new_mpath;
144 struct mpath_node *node, *new_node;
145 struct hlist_head *bucket;
146 struct hlist_node *n;
147 int grow = 0;
148 int err = 0;
149 u32 hash_idx;
150
151 if (memcmp(dst, dev->dev_addr, ETH_ALEN) == 0)
152 /* never add ourselves as neighbours */
153 return -ENOTSUPP;
154
155 if (is_multicast_ether_addr(dst))
156 return -ENOTSUPP;
157
158 if (atomic_add_unless(&sdata->u.sta.mpaths, 1, MESH_MAX_MPATHS) == 0)
159 return -ENOSPC;
160
161 read_lock(&pathtbl_resize_lock);
162
163 new_mpath = kzalloc(sizeof(struct mesh_path), GFP_KERNEL);
164 if (!new_mpath) {
165 atomic_dec(&sdata->u.sta.mpaths);
166 err = -ENOMEM;
167 goto endadd2;
168 }
169 memcpy(new_mpath->dst, dst, ETH_ALEN);
170 new_mpath->dev = dev;
171 new_mpath->flags = 0;
172 skb_queue_head_init(&new_mpath->frame_queue);
173 new_node = kmalloc(sizeof(struct mpath_node), GFP_KERNEL);
174 new_node->mpath = new_mpath;
175 new_mpath->timer.data = (unsigned long) new_mpath;
176 new_mpath->timer.function = mesh_path_timer;
177 new_mpath->exp_time = jiffies;
178 spin_lock_init(&new_mpath->state_lock);
179 init_timer(&new_mpath->timer);
180
181 hash_idx = mesh_table_hash(dst, dev, mesh_paths);
182 bucket = &mesh_paths->hash_buckets[hash_idx];
183
184 spin_lock(&mesh_paths->hashwlock[hash_idx]);
185
186 hlist_for_each_entry(node, n, bucket, list) {
187 mpath = node->mpath;
188 if (mpath->dev == dev && memcmp(dst, mpath->dst, ETH_ALEN)
189 == 0) {
190 err = -EEXIST;
191 atomic_dec(&sdata->u.sta.mpaths);
192 kfree(new_node);
193 kfree(new_mpath);
194 goto endadd;
195 }
196 }
197
198 hlist_add_head_rcu(&new_node->list, bucket);
199 if (atomic_inc_return(&mesh_paths->entries) >=
200 mesh_paths->mean_chain_len * (mesh_paths->hash_mask + 1))
201 grow = 1;
202
203endadd:
204 spin_unlock(&mesh_paths->hashwlock[hash_idx]);
205endadd2:
206 read_unlock(&pathtbl_resize_lock);
207 if (!err && grow) {
208 struct mesh_table *oldtbl, *newtbl;
209
210 write_lock(&pathtbl_resize_lock);
211 oldtbl = mesh_paths;
212 newtbl = mesh_table_grow(mesh_paths);
213 if (!newtbl) {
214 write_unlock(&pathtbl_resize_lock);
215 return -ENOMEM;
216 }
217 rcu_assign_pointer(mesh_paths, newtbl);
218 synchronize_rcu();
219 mesh_table_free(oldtbl, false);
220 write_unlock(&pathtbl_resize_lock);
221 }
222 return err;
223}
224
225
226/**
227 * mesh_plink_broken - deactivates paths and sends perr when a link breaks
228 *
229 * @sta: broken peer link
230 *
231 * This function must be called from the rate control algorithm if enough
232 * delivery errors suggest that a peer link is no longer usable.
233 */
234void mesh_plink_broken(struct sta_info *sta)
235{
236 struct mesh_path *mpath;
237 struct mpath_node *node;
238 struct hlist_node *p;
239 struct net_device *dev = sta->dev;
240 int i;
241
242 rcu_read_lock();
243 for_each_mesh_entry(mesh_paths, p, node, i) {
244 mpath = node->mpath;
245 spin_lock_bh(&mpath->state_lock);
246 if (mpath->next_hop == sta &&
247 mpath->flags & MESH_PATH_ACTIVE &&
248 !(mpath->flags & MESH_PATH_FIXED)) {
249 mpath->flags &= ~MESH_PATH_ACTIVE;
250 ++mpath->dsn;
251 spin_unlock_bh(&mpath->state_lock);
252 mesh_path_error_tx(mpath->dst,
253 cpu_to_le32(mpath->dsn),
254 dev->broadcast, dev);
255 } else
256 spin_unlock_bh(&mpath->state_lock);
257 }
258 rcu_read_unlock();
259}
260
261/**
262 * mesh_path_flush_by_nexthop - Deletes mesh paths if their next hop matches
263 *
264 * @sta - mesh peer to match
265 *
266 * RCU notes: this function is called when a mesh plink transitions from ESTAB
267 * to any other state, since ESTAB state is the only one that allows path
268 * creation. This will happen before the sta can be freed (since we hold
269 * a reference to it) so any reader in a rcu read block will be protected
270 * against the plink dissapearing.
271 */
272void mesh_path_flush_by_nexthop(struct sta_info *sta)
273{
274 struct mesh_path *mpath;
275 struct mpath_node *node;
276 struct hlist_node *p;
277 int i;
278
279 for_each_mesh_entry(mesh_paths, p, node, i) {
280 mpath = node->mpath;
281 if (mpath->next_hop == sta)
282 mesh_path_del(mpath->dst, mpath->dev);
283 }
284}
285
286void mesh_path_flush(struct net_device *dev)
287{
288 struct mesh_path *mpath;
289 struct mpath_node *node;
290 struct hlist_node *p;
291 int i;
292
293 for_each_mesh_entry(mesh_paths, p, node, i) {
294 mpath = node->mpath;
295 if (mpath->dev == dev)
296 mesh_path_del(mpath->dst, mpath->dev);
297 }
298}
299
300static void mesh_path_node_reclaim(struct rcu_head *rp)
301{
302 struct mpath_node *node = container_of(rp, struct mpath_node, rcu);
303 struct ieee80211_sub_if_data *sdata =
304 IEEE80211_DEV_TO_SUB_IF(node->mpath->dev);
305 if (node->mpath->next_hop)
306 sta_info_put(node->mpath->next_hop);
307 atomic_dec(&sdata->u.sta.mpaths);
308 kfree(node->mpath);
309 kfree(node);
310}
311
312/**
313 * mesh_path_del - delete a mesh path from the table
314 *
315 * @addr: dst address (ETH_ALEN length)
316 * @dev: local interface
317 *
318 * Returns: 0 if succesful
319 *
320 * State: if the path is being resolved, the deletion will be postponed until
321 * the path resolution completes or times out.
322 */
323int mesh_path_del(u8 *addr, struct net_device *dev)
324{
325 struct mesh_path *mpath;
326 struct mpath_node *node;
327 struct hlist_head *bucket;
328 struct hlist_node *n;
329 int hash_idx;
330 int err = 0;
331
332 read_lock(&pathtbl_resize_lock);
333 hash_idx = mesh_table_hash(addr, dev, mesh_paths);
334 bucket = &mesh_paths->hash_buckets[hash_idx];
335
336 spin_lock(&mesh_paths->hashwlock[hash_idx]);
337 hlist_for_each_entry(node, n, bucket, list) {
338 mpath = node->mpath;
339 if (mpath->dev == dev &&
340 memcmp(addr, mpath->dst, ETH_ALEN) == 0) {
341 spin_lock_bh(&mpath->state_lock);
342 if (mpath->flags & MESH_PATH_RESOLVING) {
343 mpath->flags |= MESH_PATH_DELETE;
344 } else {
345 mpath->flags |= MESH_PATH_RESOLVING;
346 hlist_del_rcu(&node->list);
347 call_rcu(&node->rcu, mesh_path_node_reclaim);
348 atomic_dec(&mesh_paths->entries);
349 }
350 spin_unlock_bh(&mpath->state_lock);
351 goto enddel;
352 }
353 }
354
355 err = -ENXIO;
356enddel:
357 spin_unlock(&mesh_paths->hashwlock[hash_idx]);
358 read_unlock(&pathtbl_resize_lock);
359 return err;
360}
361
362/**
363 * mesh_path_tx_pending - sends pending frames in a mesh path queue
364 *
365 * @mpath: mesh path to activate
366 *
367 * Locking: the state_lock of the mpath structure must NOT be held when calling
368 * this function.
369 */
370void mesh_path_tx_pending(struct mesh_path *mpath)
371{
372 struct sk_buff *skb;
373
374 while ((skb = skb_dequeue(&mpath->frame_queue)) &&
375 (mpath->flags & MESH_PATH_ACTIVE))
376 dev_queue_xmit(skb);
377}
378
379/**
380 * mesh_path_discard_frame - discard a frame whose path could not be resolved
381 *
382 * @skb: frame to discard
383 * @dev: network device the frame was to be sent through
384 *
385 * If the frame was beign forwarded from another MP, a PERR frame will be sent
386 * to the precursor.
387 *
388 * Locking: the function must me called within a rcu_read_lock region
389 */
390void mesh_path_discard_frame(struct sk_buff *skb, struct net_device *dev)
391{
392 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
393 struct mesh_path *mpath;
394 u32 dsn = 0;
395
396 if (skb->pkt_type == PACKET_OTHERHOST) {
397 struct ieee80211s_hdr *prev_meshhdr;
398 int mshhdrlen;
399 u8 *ra, *da;
400
401 prev_meshhdr = ((struct ieee80211s_hdr *)skb->cb);
402 mshhdrlen = ieee80211_get_mesh_hdrlen(prev_meshhdr);
403 da = skb->data;
404 ra = MESH_PREQ(skb);
405 mpath = mesh_path_lookup(da, dev);
406 if (mpath)
407 dsn = ++mpath->dsn;
408 mesh_path_error_tx(skb->data, cpu_to_le32(dsn), ra, dev);
409 }
410
411 kfree_skb(skb);
412 sdata->u.sta.mshstats.dropped_frames_no_route++;
413}
414
415/**
416 * mesh_path_flush_pending - free the pending queue of a mesh path
417 *
418 * @mpath: mesh path whose queue has to be freed
419 *
420 * Locking: the function must me called withing a rcu_read_lock region
421 */
422void mesh_path_flush_pending(struct mesh_path *mpath)
423{
424 struct ieee80211_sub_if_data *sdata;
425 struct sk_buff *skb;
426
427 sdata = IEEE80211_DEV_TO_SUB_IF(mpath->dev);
428
429 while ((skb = skb_dequeue(&mpath->frame_queue)) &&
430 (mpath->flags & MESH_PATH_ACTIVE))
431 mesh_path_discard_frame(skb, mpath->dev);
432}
433
434/**
435 * mesh_path_fix_nexthop - force a specific next hop for a mesh path
436 *
437 * @mpath: the mesh path to modify
438 * @next_hop: the next hop to force
439 *
440 * Locking: this function must be called holding mpath->state_lock
441 */
442void mesh_path_fix_nexthop(struct mesh_path *mpath, struct sta_info *next_hop)
443{
444 spin_lock_bh(&mpath->state_lock);
445 mesh_path_assign_nexthop(mpath, next_hop);
446 mpath->dsn = 0xffff;
447 mpath->metric = 0;
448 mpath->hop_count = 0;
449 mpath->exp_time = 0;
450 mpath->flags |= MESH_PATH_FIXED;
451 mesh_path_activate(mpath);
452 spin_unlock_bh(&mpath->state_lock);
453 mesh_path_tx_pending(mpath);
454}
455
456static void mesh_path_node_free(struct hlist_node *p, bool free_leafs)
457{
458 struct mesh_path *mpath;
459 struct mpath_node *node = hlist_entry(p, struct mpath_node, list);
460 mpath = node->mpath;
461 hlist_del_rcu(p);
462 synchronize_rcu();
463 if (free_leafs)
464 kfree(mpath);
465 kfree(node);
466}
467
468static void mesh_path_node_copy(struct hlist_node *p, struct mesh_table *newtbl)
469{
470 struct mesh_path *mpath;
471 struct mpath_node *node, *new_node;
472 u32 hash_idx;
473
474 node = hlist_entry(p, struct mpath_node, list);
475 mpath = node->mpath;
476 new_node = kmalloc(sizeof(struct mpath_node), GFP_KERNEL);
477 new_node->mpath = mpath;
478 hash_idx = mesh_table_hash(mpath->dst, mpath->dev, newtbl);
479 hlist_add_head(&new_node->list,
480 &newtbl->hash_buckets[hash_idx]);
481}
482
483int mesh_pathtbl_init(void)
484{
485 mesh_paths = mesh_table_alloc(INIT_PATHS_SIZE_ORDER);
486 mesh_paths->free_node = &mesh_path_node_free;
487 mesh_paths->copy_node = &mesh_path_node_copy;
488 mesh_paths->mean_chain_len = MEAN_CHAIN_LEN;
489 if (!mesh_paths)
490 return -ENOMEM;
491 return 0;
492}
493
494void mesh_path_expire(struct net_device *dev)
495{
496 struct mesh_path *mpath;
497 struct mpath_node *node;
498 struct hlist_node *p;
499 int i;
500
501 read_lock(&pathtbl_resize_lock);
502 for_each_mesh_entry(mesh_paths, p, node, i) {
503 if (node->mpath->dev != dev)
504 continue;
505 mpath = node->mpath;
506 spin_lock_bh(&mpath->state_lock);
507 if ((!(mpath->flags & MESH_PATH_RESOLVING)) &&
508 (!(mpath->flags & MESH_PATH_FIXED)) &&
509 time_after(jiffies,
510 mpath->exp_time + MESH_PATH_EXPIRE)) {
511 spin_unlock_bh(&mpath->state_lock);
512 mesh_path_del(mpath->dst, mpath->dev);
513 } else
514 spin_unlock_bh(&mpath->state_lock);
515 }
516 read_unlock(&pathtbl_resize_lock);
517}
518
519void mesh_pathtbl_unregister(void)
520{
521 mesh_table_free(mesh_paths, true);
522}