aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/networking/batman-adv.txt14
-rw-r--r--net/batman-adv/Kconfig2
-rw-r--r--net/batman-adv/Makefile1
-rw-r--r--net/batman-adv/bat_sysfs.c2
-rw-r--r--net/batman-adv/bridge_loop_avoidance.c1296
-rw-r--r--net/batman-adv/bridge_loop_avoidance.h37
-rw-r--r--net/batman-adv/hard-interface.c18
-rw-r--r--net/batman-adv/main.c6
-rw-r--r--net/batman-adv/main.h6
-rw-r--r--net/batman-adv/originator.c1
-rw-r--r--net/batman-adv/packet.h17
-rw-r--r--net/batman-adv/routing.c7
-rw-r--r--net/batman-adv/soft-interface.c12
-rw-r--r--net/batman-adv/types.h27
14 files changed, 1430 insertions, 16 deletions
diff --git a/Documentation/networking/batman-adv.txt b/Documentation/networking/batman-adv.txt
index 221ad0cdf11f..9523e9ed19e9 100644
--- a/Documentation/networking/batman-adv.txt
+++ b/Documentation/networking/batman-adv.txt
@@ -67,10 +67,11 @@ To deactivate an interface you have to write "none" into its
67All mesh wide settings can be found in batman's own interface 67All mesh wide settings can be found in batman's own interface
68folder: 68folder:
69 69
70# ls /sys/class/net/bat0/mesh/ 70# ls /sys/class/net/bat0/mesh/
71# aggregated_ogms fragmentation gw_sel_class vis_mode 71# aggregated_ogms fragmentation hop_penalty
72# ap_isolation gw_bandwidth hop_penalty 72# ap_isolation gw_bandwidth log_level
73# bonding gw_mode orig_interval 73# bonding gw_mode orig_interval
74# bridge_loop_avoidance gw_sel_class vis_mode
74 75
75 76
76There is a special folder for debugging information: 77There is a special folder for debugging information:
@@ -202,12 +203,13 @@ abled during run time. Following log_levels are defined:
2021 - Enable messages related to routing / flooding / broadcasting 2031 - Enable messages related to routing / flooding / broadcasting
2032 - Enable messages related to route added / changed / deleted 2042 - Enable messages related to route added / changed / deleted
2044 - Enable messages related to translation table operations 2054 - Enable messages related to translation table operations
2057 - Enable all messages 2068 - Enable messages related to bridge loop avoidance
20715 - enable all messages
206 208
207The debug output can be changed at runtime using the file 209The debug output can be changed at runtime using the file
208/sys/class/net/bat0/mesh/log_level. e.g. 210/sys/class/net/bat0/mesh/log_level. e.g.
209 211
210# echo 2 > /sys/class/net/bat0/mesh/log_level 212# echo 6 > /sys/class/net/bat0/mesh/log_level
211 213
212will enable debug messages for when routes change. 214will enable debug messages for when routes change.
213 215
diff --git a/net/batman-adv/Kconfig b/net/batman-adv/Kconfig
index a04d28f392e6..6ff977c1f3bc 100644
--- a/net/batman-adv/Kconfig
+++ b/net/batman-adv/Kconfig
@@ -4,7 +4,7 @@
4 4
5config BATMAN_ADV 5config BATMAN_ADV
6 tristate "B.A.T.M.A.N. Advanced Meshing Protocol" 6 tristate "B.A.T.M.A.N. Advanced Meshing Protocol"
7 depends on NET 7 depends on NET && INET
8 select CRC16 8 select CRC16
9 default n 9 default n
10 help 10 help
diff --git a/net/batman-adv/Makefile b/net/batman-adv/Makefile
index 4e392ebedb64..94b67fd81bf7 100644
--- a/net/batman-adv/Makefile
+++ b/net/batman-adv/Makefile
@@ -23,6 +23,7 @@ batman-adv-y += bat_debugfs.o
23batman-adv-y += bat_iv_ogm.o 23batman-adv-y += bat_iv_ogm.o
24batman-adv-y += bat_sysfs.o 24batman-adv-y += bat_sysfs.o
25batman-adv-y += bitarray.o 25batman-adv-y += bitarray.o
26batman-adv-y += bridge_loop_avoidance.o
26batman-adv-y += gateway_client.o 27batman-adv-y += gateway_client.o
27batman-adv-y += gateway_common.o 28batman-adv-y += gateway_common.o
28batman-adv-y += hard-interface.o 29batman-adv-y += hard-interface.o
diff --git a/net/batman-adv/bat_sysfs.c b/net/batman-adv/bat_sysfs.c
index 68ff759fc304..d12757fbe10a 100644
--- a/net/batman-adv/bat_sysfs.c
+++ b/net/batman-adv/bat_sysfs.c
@@ -398,7 +398,7 @@ BAT_ATTR_UINT(gw_sel_class, S_IRUGO | S_IWUSR, 1, TQ_MAX_VALUE,
398static BAT_ATTR(gw_bandwidth, S_IRUGO | S_IWUSR, show_gw_bwidth, 398static BAT_ATTR(gw_bandwidth, S_IRUGO | S_IWUSR, show_gw_bwidth,
399 store_gw_bwidth); 399 store_gw_bwidth);
400#ifdef CONFIG_BATMAN_ADV_DEBUG 400#ifdef CONFIG_BATMAN_ADV_DEBUG
401BAT_ATTR_UINT(log_level, S_IRUGO | S_IWUSR, 0, 7, NULL); 401BAT_ATTR_UINT(log_level, S_IRUGO | S_IWUSR, 0, 15, NULL);
402#endif 402#endif
403 403
404static struct bat_attribute *mesh_attrs[] = { 404static struct bat_attribute *mesh_attrs[] = {
diff --git a/net/batman-adv/bridge_loop_avoidance.c b/net/batman-adv/bridge_loop_avoidance.c
new file mode 100644
index 000000000000..f84c892be7ea
--- /dev/null
+++ b/net/batman-adv/bridge_loop_avoidance.c
@@ -0,0 +1,1296 @@
1/*
2 * Copyright (C) 2011 B.A.T.M.A.N. contributors:
3 *
4 * Simon Wunderlich
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of version 2 of the GNU General Public
8 * License as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18 * 02110-1301, USA
19 *
20 */
21
22#include "main.h"
23#include "hash.h"
24#include "hard-interface.h"
25#include "originator.h"
26#include "bridge_loop_avoidance.h"
27#include "send.h"
28
29#include <linux/etherdevice.h>
30#include <linux/crc16.h>
31#include <linux/if_arp.h>
32#include <net/arp.h>
33#include <linux/if_vlan.h>
34
35static const uint8_t claim_dest[6] = {0xff, 0x43, 0x05, 0x00, 0x00, 0x00};
36static const uint8_t announce_mac[4] = {0x43, 0x05, 0x43, 0x05};
37
38static void bla_periodic_work(struct work_struct *work);
39static void bla_send_announce(struct bat_priv *bat_priv,
40 struct backbone_gw *backbone_gw);
41
42/* return the index of the claim */
43static inline uint32_t choose_claim(const void *data, uint32_t size)
44{
45 const unsigned char *key = data;
46 uint32_t hash = 0;
47 size_t i;
48
49 for (i = 0; i < ETH_ALEN + sizeof(short); i++) {
50 hash += key[i];
51 hash += (hash << 10);
52 hash ^= (hash >> 6);
53 }
54
55 hash += (hash << 3);
56 hash ^= (hash >> 11);
57 hash += (hash << 15);
58
59 return hash % size;
60}
61
62/* return the index of the backbone gateway */
63static inline uint32_t choose_backbone_gw(const void *data, uint32_t size)
64{
65 const unsigned char *key = data;
66 uint32_t hash = 0;
67 size_t i;
68
69 for (i = 0; i < ETH_ALEN + sizeof(short); i++) {
70 hash += key[i];
71 hash += (hash << 10);
72 hash ^= (hash >> 6);
73 }
74
75 hash += (hash << 3);
76 hash ^= (hash >> 11);
77 hash += (hash << 15);
78
79 return hash % size;
80}
81
82
83/* compares address and vid of two backbone gws */
84static int compare_backbone_gw(const struct hlist_node *node, const void *data2)
85{
86 const void *data1 = container_of(node, struct backbone_gw,
87 hash_entry);
88
89 return (memcmp(data1, data2, ETH_ALEN + sizeof(short)) == 0 ? 1 : 0);
90}
91
92/* compares address and vid of two claims */
93static int compare_claim(const struct hlist_node *node, const void *data2)
94{
95 const void *data1 = container_of(node, struct claim,
96 hash_entry);
97
98 return (memcmp(data1, data2, ETH_ALEN + sizeof(short)) == 0 ? 1 : 0);
99}
100
101/* free a backbone gw */
102static void backbone_gw_free_ref(struct backbone_gw *backbone_gw)
103{
104 if (atomic_dec_and_test(&backbone_gw->refcount))
105 kfree_rcu(backbone_gw, rcu);
106}
107
108/* finally deinitialize the claim */
109static void claim_free_rcu(struct rcu_head *rcu)
110{
111 struct claim *claim;
112
113 claim = container_of(rcu, struct claim, rcu);
114
115 backbone_gw_free_ref(claim->backbone_gw);
116 kfree(claim);
117}
118
119/* free a claim, call claim_free_rcu if its the last reference */
120static void claim_free_ref(struct claim *claim)
121{
122 if (atomic_dec_and_test(&claim->refcount))
123 call_rcu(&claim->rcu, claim_free_rcu);
124}
125
126/**
127 * @bat_priv: the bat priv with all the soft interface information
128 * @data: search data (may be local/static data)
129 *
130 * looks for a claim in the hash, and returns it if found
131 * or NULL otherwise.
132 */
133static struct claim *claim_hash_find(struct bat_priv *bat_priv,
134 struct claim *data)
135{
136 struct hashtable_t *hash = bat_priv->claim_hash;
137 struct hlist_head *head;
138 struct hlist_node *node;
139 struct claim *claim;
140 struct claim *claim_tmp = NULL;
141 int index;
142
143 if (!hash)
144 return NULL;
145
146 index = choose_claim(data, hash->size);
147 head = &hash->table[index];
148
149 rcu_read_lock();
150 hlist_for_each_entry_rcu(claim, node, head, hash_entry) {
151 if (!compare_claim(&claim->hash_entry, data))
152 continue;
153
154 if (!atomic_inc_not_zero(&claim->refcount))
155 continue;
156
157 claim_tmp = claim;
158 break;
159 }
160 rcu_read_unlock();
161
162 return claim_tmp;
163}
164
165/**
166 * @bat_priv: the bat priv with all the soft interface information
167 * @addr: the address of the originator
168 * @vid: the VLAN ID
169 *
170 * looks for a claim in the hash, and returns it if found
171 * or NULL otherwise.
172 */
173static struct backbone_gw *backbone_hash_find(struct bat_priv *bat_priv,
174 uint8_t *addr, short vid)
175{
176 struct hashtable_t *hash = bat_priv->backbone_hash;
177 struct hlist_head *head;
178 struct hlist_node *node;
179 struct backbone_gw search_entry, *backbone_gw;
180 struct backbone_gw *backbone_gw_tmp = NULL;
181 int index;
182
183 if (!hash)
184 return NULL;
185
186 memcpy(search_entry.orig, addr, ETH_ALEN);
187 search_entry.vid = vid;
188
189 index = choose_backbone_gw(&search_entry, hash->size);
190 head = &hash->table[index];
191
192 rcu_read_lock();
193 hlist_for_each_entry_rcu(backbone_gw, node, head, hash_entry) {
194 if (!compare_backbone_gw(&backbone_gw->hash_entry,
195 &search_entry))
196 continue;
197
198 if (!atomic_inc_not_zero(&backbone_gw->refcount))
199 continue;
200
201 backbone_gw_tmp = backbone_gw;
202 break;
203 }
204 rcu_read_unlock();
205
206 return backbone_gw_tmp;
207}
208
209/* delete all claims for a backbone */
210static void bla_del_backbone_claims(struct backbone_gw *backbone_gw)
211{
212 struct hashtable_t *hash;
213 struct hlist_node *node, *node_tmp;
214 struct hlist_head *head;
215 struct claim *claim;
216 int i;
217 spinlock_t *list_lock; /* protects write access to the hash lists */
218
219 hash = backbone_gw->bat_priv->claim_hash;
220 if (!hash)
221 return;
222
223 for (i = 0; i < hash->size; i++) {
224 head = &hash->table[i];
225 list_lock = &hash->list_locks[i];
226
227 spin_lock_bh(list_lock);
228 hlist_for_each_entry_safe(claim, node, node_tmp,
229 head, hash_entry) {
230
231 if (claim->backbone_gw != backbone_gw)
232 continue;
233
234 claim_free_ref(claim);
235 hlist_del_rcu(node);
236 }
237 spin_unlock_bh(list_lock);
238 }
239
240 /* all claims gone, intialize CRC */
241 backbone_gw->crc = BLA_CRC_INIT;
242}
243
244/**
245 * @bat_priv: the bat priv with all the soft interface information
246 * @orig: the mac address to be announced within the claim
247 * @vid: the VLAN ID
248 * @claimtype: the type of the claim (CLAIM, UNCLAIM, ANNOUNCE, ...)
249 *
250 * sends a claim frame according to the provided info.
251 */
252static void bla_send_claim(struct bat_priv *bat_priv, uint8_t *mac,
253 short vid, int claimtype)
254{
255 struct sk_buff *skb;
256 struct ethhdr *ethhdr;
257 struct hard_iface *primary_if;
258 struct net_device *soft_iface;
259 uint8_t *hw_src;
260 struct bla_claim_dst local_claim_dest;
261 uint32_t zeroip = 0;
262
263 primary_if = primary_if_get_selected(bat_priv);
264 if (!primary_if)
265 return;
266
267 memcpy(&local_claim_dest, claim_dest, sizeof(local_claim_dest));
268 local_claim_dest.type = claimtype;
269
270 soft_iface = primary_if->soft_iface;
271
272 skb = arp_create(ARPOP_REPLY, ETH_P_ARP,
273 /* IP DST: 0.0.0.0 */
274 zeroip,
275 primary_if->soft_iface,
276 /* IP SRC: 0.0.0.0 */
277 zeroip,
278 /* Ethernet DST: Broadcast */
279 NULL,
280 /* Ethernet SRC/HW SRC: originator mac */
281 primary_if->net_dev->dev_addr,
282 /* HW DST: FF:43:05:XX:00:00
283 * with XX = claim type
284 */
285 (uint8_t *)&local_claim_dest);
286
287 if (!skb)
288 goto out;
289
290 ethhdr = (struct ethhdr *)skb->data;
291 hw_src = (uint8_t *)ethhdr +
292 sizeof(struct ethhdr) +
293 sizeof(struct arphdr);
294
295 /* now we pretend that the client would have sent this ... */
296 switch (claimtype) {
297 case CLAIM_TYPE_ADD:
298 /* normal claim frame
299 * set Ethernet SRC to the clients mac
300 */
301 memcpy(ethhdr->h_source, mac, ETH_ALEN);
302 bat_dbg(DBG_BLA, bat_priv,
303 "bla_send_claim(): CLAIM %pM on vid %d\n", mac, vid);
304 break;
305 case CLAIM_TYPE_DEL:
306 /* unclaim frame
307 * set HW SRC to the clients mac
308 */
309 memcpy(hw_src, mac, ETH_ALEN);
310 bat_dbg(DBG_BLA, bat_priv,
311 "bla_send_claim(): UNCLAIM %pM on vid %d\n", mac, vid);
312 break;
313 case CLAIM_TYPE_ANNOUNCE:
314 /* announcement frame
315 * set HW SRC to the special mac containg the crc
316 */
317 memcpy(hw_src, mac, ETH_ALEN);
318 bat_dbg(DBG_BLA, bat_priv,
319 "bla_send_claim(): ANNOUNCE of %pM on vid %d\n",
320 ethhdr->h_source, vid);
321 break;
322 case CLAIM_TYPE_REQUEST:
323 /* request frame
324 * set HW SRC to the special mac containg the crc
325 */
326 memcpy(hw_src, mac, ETH_ALEN);
327 memcpy(ethhdr->h_dest, mac, ETH_ALEN);
328 bat_dbg(DBG_BLA, bat_priv,
329 "bla_send_claim(): REQUEST of %pM to %pMon vid %d\n",
330 ethhdr->h_source, ethhdr->h_dest, vid);
331 break;
332
333 }
334
335 if (vid != -1)
336 skb = vlan_insert_tag(skb, vid);
337
338 skb_reset_mac_header(skb);
339 skb->protocol = eth_type_trans(skb, soft_iface);
340 bat_priv->stats.rx_packets++;
341 bat_priv->stats.rx_bytes += skb->len + sizeof(struct ethhdr);
342 soft_iface->last_rx = jiffies;
343
344 netif_rx(skb);
345out:
346 if (primary_if)
347 hardif_free_ref(primary_if);
348}
349
350/**
351 * @bat_priv: the bat priv with all the soft interface information
352 * @orig: the mac address of the originator
353 * @vid: the VLAN ID
354 *
355 * searches for the backbone gw or creates a new one if it could not
356 * be found.
357 */
358static struct backbone_gw *bla_get_backbone_gw(struct bat_priv *bat_priv,
359 uint8_t *orig, short vid)
360{
361 struct backbone_gw *entry;
362 int hash_added;
363
364 entry = backbone_hash_find(bat_priv, orig, vid);
365
366 if (entry)
367 return entry;
368
369 bat_dbg(DBG_BLA, bat_priv,
370 "bla_get_backbone_gw(): not found (%pM, %d), creating new entry\n",
371 orig, vid);
372
373 entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
374 if (!entry)
375 return NULL;
376
377 entry->vid = vid;
378 entry->lasttime = jiffies;
379 entry->crc = BLA_CRC_INIT;
380 entry->bat_priv = bat_priv;
381 atomic_set(&entry->request_sent, 0);
382 memcpy(entry->orig, orig, ETH_ALEN);
383
384 /* one for the hash, one for returning */
385 atomic_set(&entry->refcount, 2);
386
387 hash_added = hash_add(bat_priv->backbone_hash, compare_backbone_gw,
388 choose_backbone_gw, entry, &entry->hash_entry);
389
390 if (unlikely(hash_added != 0)) {
391 /* hash failed, free the structure */
392 kfree(entry);
393 return NULL;
394 }
395
396 return entry;
397}
398
399/* update or add the own backbone gw to make sure we announce
400 * where we receive other backbone gws
401 */
402static void bla_update_own_backbone_gw(struct bat_priv *bat_priv,
403 struct hard_iface *primary_if,
404 short vid)
405{
406 struct backbone_gw *backbone_gw;
407
408 backbone_gw = bla_get_backbone_gw(bat_priv,
409 primary_if->net_dev->dev_addr, vid);
410 if (unlikely(!backbone_gw))
411 return;
412
413 backbone_gw->lasttime = jiffies;
414 backbone_gw_free_ref(backbone_gw);
415}
416
417/**
418 * @bat_priv: the bat priv with all the soft interface information
419 * @vid: the vid where the request came on
420 *
421 * Repeat all of our own claims, and finally send an ANNOUNCE frame
422 * to allow the requester another check if the CRC is correct now.
423 */
424static void bla_answer_request(struct bat_priv *bat_priv,
425 struct hard_iface *primary_if, short vid)
426{
427 struct hlist_node *node;
428 struct hlist_head *head;
429 struct hashtable_t *hash;
430 struct claim *claim;
431 struct backbone_gw *backbone_gw;
432 int i;
433
434 bat_dbg(DBG_BLA, bat_priv,
435 "bla_answer_request(): received a claim request, send all of our own claims again\n");
436
437 backbone_gw = backbone_hash_find(bat_priv,
438 primary_if->net_dev->dev_addr, vid);
439 if (!backbone_gw)
440 return;
441
442 hash = bat_priv->claim_hash;
443 for (i = 0; i < hash->size; i++) {
444 head = &hash->table[i];
445
446 rcu_read_lock();
447 hlist_for_each_entry_rcu(claim, node, head, hash_entry) {
448 /* only own claims are interesting */
449 if (claim->backbone_gw != backbone_gw)
450 continue;
451
452 bla_send_claim(bat_priv, claim->addr, claim->vid,
453 CLAIM_TYPE_ADD);
454 }
455 rcu_read_unlock();
456 }
457
458 /* finally, send an announcement frame */
459 bla_send_announce(bat_priv, backbone_gw);
460 backbone_gw_free_ref(backbone_gw);
461}
462
463/**
464 * @backbone_gw: the backbone gateway from whom we are out of sync
465 *
466 * When the crc is wrong, ask the backbone gateway for a full table update.
467 * After the request, it will repeat all of his own claims and finally
468 * send an announcement claim with which we can check again.
469 */
470static void bla_send_request(struct backbone_gw *backbone_gw)
471{
472 /* first, remove all old entries */
473 bla_del_backbone_claims(backbone_gw);
474
475 bat_dbg(DBG_BLA, backbone_gw->bat_priv,
476 "Sending REQUEST to %pM\n",
477 backbone_gw->orig);
478
479 /* send request */
480 bla_send_claim(backbone_gw->bat_priv, backbone_gw->orig,
481 backbone_gw->vid, CLAIM_TYPE_REQUEST);
482
483 /* no local broadcasts should be sent or received, for now. */
484 if (!atomic_read(&backbone_gw->request_sent)) {
485 atomic_inc(&backbone_gw->bat_priv->bla_num_requests);
486 atomic_set(&backbone_gw->request_sent, 1);
487 }
488}
489
490/**
491 * @bat_priv: the bat priv with all the soft interface information
492 * @backbone_gw: our backbone gateway which should be announced
493 *
494 * This function sends an announcement. It is called from multiple
495 * places.
496 */
497static void bla_send_announce(struct bat_priv *bat_priv,
498 struct backbone_gw *backbone_gw)
499{
500 uint8_t mac[ETH_ALEN];
501 uint16_t crc;
502
503 memcpy(mac, announce_mac, 4);
504 crc = htons(backbone_gw->crc);
505 memcpy(&mac[4], (uint8_t *)&crc, 2);
506
507 bla_send_claim(bat_priv, mac, backbone_gw->vid, CLAIM_TYPE_ANNOUNCE);
508
509}
510
511/**
512 * @bat_priv: the bat priv with all the soft interface information
513 * @mac: the mac address of the claim
514 * @vid: the VLAN ID of the frame
515 * @backbone_gw: the backbone gateway which claims it
516 *
517 * Adds a claim in the claim hash.
518 */
519static void bla_add_claim(struct bat_priv *bat_priv, const uint8_t *mac,
520 const short vid, struct backbone_gw *backbone_gw)
521{
522 struct claim *claim;
523 struct claim search_claim;
524 int hash_added;
525
526 memcpy(search_claim.addr, mac, ETH_ALEN);
527 search_claim.vid = vid;
528 claim = claim_hash_find(bat_priv, &search_claim);
529
530 /* create a new claim entry if it does not exist yet. */
531 if (!claim) {
532 claim = kzalloc(sizeof(*claim), GFP_ATOMIC);
533 if (!claim)
534 return;
535
536 memcpy(claim->addr, mac, ETH_ALEN);
537 claim->vid = vid;
538 claim->lasttime = jiffies;
539 claim->backbone_gw = backbone_gw;
540
541 atomic_set(&claim->refcount, 2);
542 bat_dbg(DBG_BLA, bat_priv,
543 "bla_add_claim(): adding new entry %pM, vid %d to hash ...\n",
544 mac, vid);
545 hash_added = hash_add(bat_priv->claim_hash, compare_claim,
546 choose_claim, claim, &claim->hash_entry);
547
548 if (unlikely(hash_added != 0)) {
549 /* only local changes happened. */
550 kfree(claim);
551 return;
552 }
553 } else {
554 claim->lasttime = jiffies;
555 if (claim->backbone_gw == backbone_gw)
556 /* no need to register a new backbone */
557 goto claim_free_ref;
558
559 bat_dbg(DBG_BLA, bat_priv,
560 "bla_add_claim(): changing ownership for %pM, vid %d\n",
561 mac, vid);
562
563 claim->backbone_gw->crc ^=
564 crc16(0, claim->addr, ETH_ALEN);
565 backbone_gw_free_ref(claim->backbone_gw);
566
567 }
568 /* set (new) backbone gw */
569 atomic_inc(&backbone_gw->refcount);
570 claim->backbone_gw = backbone_gw;
571
572 backbone_gw->crc ^= crc16(0, claim->addr, ETH_ALEN);
573 backbone_gw->lasttime = jiffies;
574
575claim_free_ref:
576 claim_free_ref(claim);
577}
578
579/* Delete a claim from the claim hash which has the
580 * given mac address and vid.
581 */
582static void bla_del_claim(struct bat_priv *bat_priv, const uint8_t *mac,
583 const short vid)
584{
585 struct claim search_claim, *claim;
586
587 memcpy(search_claim.addr, mac, ETH_ALEN);
588 search_claim.vid = vid;
589 claim = claim_hash_find(bat_priv, &search_claim);
590 if (!claim)
591 return;
592
593 bat_dbg(DBG_BLA, bat_priv, "bla_del_claim(): %pM, vid %d\n", mac, vid);
594
595 hash_remove(bat_priv->claim_hash, compare_claim, choose_claim, claim);
596 claim_free_ref(claim); /* reference from the hash is gone */
597
598 claim->backbone_gw->crc ^= crc16(0, claim->addr, ETH_ALEN);
599
600 /* don't need the reference from hash_find() anymore */
601 claim_free_ref(claim);
602}
603
604/* check for ANNOUNCE frame, return 1 if handled */
605static int handle_announce(struct bat_priv *bat_priv,
606 uint8_t *an_addr, uint8_t *backbone_addr, short vid)
607{
608 struct backbone_gw *backbone_gw;
609 uint16_t crc;
610
611 if (memcmp(an_addr, announce_mac, 4) != 0)
612 return 0;
613
614 backbone_gw = bla_get_backbone_gw(bat_priv, backbone_addr, vid);
615
616 if (unlikely(!backbone_gw))
617 return 1;
618
619
620 /* handle as ANNOUNCE frame */
621 backbone_gw->lasttime = jiffies;
622 crc = ntohs(*((uint16_t *)(&an_addr[4])));
623
624 bat_dbg(DBG_BLA, bat_priv,
625 "handle_announce(): ANNOUNCE vid %d (sent by %pM)... CRC = %04x\n",
626 vid, backbone_gw->orig, crc);
627
628 if (backbone_gw->crc != crc) {
629 bat_dbg(DBG_BLA, backbone_gw->bat_priv,
630 "handle_announce(): CRC FAILED for %pM/%d (my = %04x, sent = %04x)\n",
631 backbone_gw->orig, backbone_gw->vid, backbone_gw->crc,
632 crc);
633
634 bla_send_request(backbone_gw);
635 } else {
636 /* if we have sent a request and the crc was OK,
637 * we can allow traffic again.
638 */
639 if (atomic_read(&backbone_gw->request_sent)) {
640 atomic_dec(&backbone_gw->bat_priv->bla_num_requests);
641 atomic_set(&backbone_gw->request_sent, 0);
642 }
643 }
644
645 backbone_gw_free_ref(backbone_gw);
646 return 1;
647}
648
649/* check for REQUEST frame, return 1 if handled */
650static int handle_request(struct bat_priv *bat_priv,
651 struct hard_iface *primary_if,
652 uint8_t *backbone_addr,
653 struct ethhdr *ethhdr, short vid)
654{
655 /* check for REQUEST frame */
656 if (!compare_eth(backbone_addr, ethhdr->h_dest))
657 return 0;
658
659 /* sanity check, this should not happen on a normal switch,
660 * we ignore it in this case.
661 */
662 if (!compare_eth(ethhdr->h_dest, primary_if->net_dev->dev_addr))
663 return 1;
664
665 bat_dbg(DBG_BLA, bat_priv,
666 "handle_request(): REQUEST vid %d (sent by %pM)...\n",
667 vid, ethhdr->h_source);
668
669 bla_answer_request(bat_priv, primary_if, vid);
670 return 1;
671}
672
673/* check for UNCLAIM frame, return 1 if handled */
674static int handle_unclaim(struct bat_priv *bat_priv,
675 struct hard_iface *primary_if,
676 uint8_t *backbone_addr,
677 uint8_t *claim_addr, short vid)
678{
679 struct backbone_gw *backbone_gw;
680
681 /* unclaim in any case if it is our own */
682 if (primary_if && compare_eth(backbone_addr,
683 primary_if->net_dev->dev_addr))
684 bla_send_claim(bat_priv, claim_addr, vid, CLAIM_TYPE_DEL);
685
686 backbone_gw = backbone_hash_find(bat_priv, backbone_addr, vid);
687
688 if (!backbone_gw)
689 return 1;
690
691 /* this must be an UNCLAIM frame */
692 bat_dbg(DBG_BLA, bat_priv,
693 "handle_unclaim(): UNCLAIM %pM on vid %d (sent by %pM)...\n",
694 claim_addr, vid, backbone_gw->orig);
695
696 bla_del_claim(bat_priv, claim_addr, vid);
697 backbone_gw_free_ref(backbone_gw);
698 return 1;
699}
700
701/* check for CLAIM frame, return 1 if handled */
702static int handle_claim(struct bat_priv *bat_priv,
703 struct hard_iface *primary_if, uint8_t *backbone_addr,
704 uint8_t *claim_addr, short vid)
705{
706 struct backbone_gw *backbone_gw;
707
708 /* register the gateway if not yet available, and add the claim. */
709
710 backbone_gw = bla_get_backbone_gw(bat_priv, backbone_addr, vid);
711
712 if (unlikely(!backbone_gw))
713 return 1;
714
715 /* this must be a CLAIM frame */
716 bla_add_claim(bat_priv, claim_addr, vid, backbone_gw);
717 if (compare_eth(backbone_addr, primary_if->net_dev->dev_addr))
718 bla_send_claim(bat_priv, claim_addr, vid, CLAIM_TYPE_ADD);
719
720 /* TODO: we could call something like tt_local_del() here. */
721
722 backbone_gw_free_ref(backbone_gw);
723 return 1;
724}
725
726/**
727 * @bat_priv: the bat priv with all the soft interface information
728 * @skb: the frame to be checked
729 *
730 * Check if this is a claim frame, and process it accordingly.
731 *
732 * returns 1 if it was a claim frame, otherwise return 0 to
733 * tell the callee that it can use the frame on its own.
734 */
735static int bla_process_claim(struct bat_priv *bat_priv,
736 struct hard_iface *primary_if,
737 struct sk_buff *skb)
738{
739 struct ethhdr *ethhdr;
740 struct vlan_ethhdr *vhdr;
741 struct arphdr *arphdr;
742 uint8_t *hw_src, *hw_dst;
743 struct bla_claim_dst *bla_dst;
744 uint16_t proto;
745 int headlen;
746 short vid = -1;
747
748 ethhdr = (struct ethhdr *)skb_mac_header(skb);
749
750 if (ntohs(ethhdr->h_proto) == ETH_P_8021Q) {
751 vhdr = (struct vlan_ethhdr *)ethhdr;
752 vid = ntohs(vhdr->h_vlan_TCI) & VLAN_VID_MASK;
753 proto = ntohs(vhdr->h_vlan_encapsulated_proto);
754 headlen = sizeof(*vhdr);
755 } else {
756 proto = ntohs(ethhdr->h_proto);
757 headlen = sizeof(*ethhdr);
758 }
759
760 if (proto != ETH_P_ARP)
761 return 0; /* not a claim frame */
762
763 /* this must be a ARP frame. check if it is a claim. */
764
765 if (unlikely(!pskb_may_pull(skb, headlen + arp_hdr_len(skb->dev))))
766 return 0;
767
768 /* pskb_may_pull() may have modified the pointers, get ethhdr again */
769 ethhdr = (struct ethhdr *)skb_mac_header(skb);
770 arphdr = (struct arphdr *)((uint8_t *)ethhdr + headlen);
771
772 /* Check whether the ARP frame carries a valid
773 * IP information
774 */
775
776 if (arphdr->ar_hrd != htons(ARPHRD_ETHER))
777 return 0;
778 if (arphdr->ar_pro != htons(ETH_P_IP))
779 return 0;
780 if (arphdr->ar_hln != ETH_ALEN)
781 return 0;
782 if (arphdr->ar_pln != 4)
783 return 0;
784
785 hw_src = (uint8_t *)arphdr + sizeof(struct arphdr);
786 hw_dst = hw_src + ETH_ALEN + 4;
787 bla_dst = (struct bla_claim_dst *)hw_dst;
788
789 /* check if it is a claim frame. */
790 if (memcmp(hw_dst, claim_dest, 3) != 0)
791 return 0;
792
793 /* become a backbone gw ourselves on this vlan if not happened yet */
794 bla_update_own_backbone_gw(bat_priv, primary_if, vid);
795
796 /* check for the different types of claim frames ... */
797 switch (bla_dst->type) {
798 case CLAIM_TYPE_ADD:
799 if (handle_claim(bat_priv, primary_if, hw_src,
800 ethhdr->h_source, vid))
801 return 1;
802 break;
803 case CLAIM_TYPE_DEL:
804 if (handle_unclaim(bat_priv, primary_if,
805 ethhdr->h_source, hw_src, vid))
806 return 1;
807 break;
808
809 case CLAIM_TYPE_ANNOUNCE:
810 if (handle_announce(bat_priv, hw_src, ethhdr->h_source, vid))
811 return 1;
812 break;
813 case CLAIM_TYPE_REQUEST:
814 if (handle_request(bat_priv, primary_if, hw_src, ethhdr, vid))
815 return 1;
816 break;
817 }
818
819 bat_dbg(DBG_BLA, bat_priv,
820 "bla_process_claim(): ERROR - this looks like a claim frame, but is useless. eth src %pM on vid %d ...(hw_src %pM, hw_dst %pM)\n",
821 ethhdr->h_source, vid, hw_src, hw_dst);
822 return 1;
823}
824
825/* Check when we last heard from other nodes, and remove them in case of
826 * a time out, or clean all backbone gws if now is set.
827 */
828static void bla_purge_backbone_gw(struct bat_priv *bat_priv, int now)
829{
830 struct backbone_gw *backbone_gw;
831 struct hlist_node *node, *node_tmp;
832 struct hlist_head *head;
833 struct hashtable_t *hash;
834 spinlock_t *list_lock; /* protects write access to the hash lists */
835 int i;
836
837 hash = bat_priv->backbone_hash;
838 if (!hash)
839 return;
840
841 for (i = 0; i < hash->size; i++) {
842 head = &hash->table[i];
843 list_lock = &hash->list_locks[i];
844
845 spin_lock_bh(list_lock);
846 hlist_for_each_entry_safe(backbone_gw, node, node_tmp,
847 head, hash_entry) {
848 if (now)
849 goto purge_now;
850 if (!has_timed_out(backbone_gw->lasttime,
851 BLA_BACKBONE_TIMEOUT))
852 continue;
853
854 bat_dbg(DBG_BLA, backbone_gw->bat_priv,
855 "bla_purge_backbone_gw(): backbone gw %pM timed out\n",
856 backbone_gw->orig);
857
858purge_now:
859 /* don't wait for the pending request anymore */
860 if (atomic_read(&backbone_gw->request_sent))
861 atomic_dec(&bat_priv->bla_num_requests);
862
863 bla_del_backbone_claims(backbone_gw);
864
865 hlist_del_rcu(node);
866 backbone_gw_free_ref(backbone_gw);
867 }
868 spin_unlock_bh(list_lock);
869 }
870}
871
872/**
873 * @bat_priv: the bat priv with all the soft interface information
874 * @primary_if: the selected primary interface, may be NULL if now is set
875 * @now: whether the whole hash shall be wiped now
876 *
877 * Check when we heard last time from our own claims, and remove them in case of
878 * a time out, or clean all claims if now is set
879 */
880static void bla_purge_claims(struct bat_priv *bat_priv,
881 struct hard_iface *primary_if, int now)
882{
883 struct claim *claim;
884 struct hlist_node *node;
885 struct hlist_head *head;
886 struct hashtable_t *hash;
887 int i;
888
889 hash = bat_priv->claim_hash;
890 if (!hash)
891 return;
892
893 for (i = 0; i < hash->size; i++) {
894 head = &hash->table[i];
895
896 rcu_read_lock();
897 hlist_for_each_entry_rcu(claim, node, head, hash_entry) {
898 if (now)
899 goto purge_now;
900 if (!compare_eth(claim->backbone_gw->orig,
901 primary_if->net_dev->dev_addr))
902 continue;
903 if (!has_timed_out(claim->lasttime,
904 BLA_CLAIM_TIMEOUT))
905 continue;
906
907 bat_dbg(DBG_BLA, bat_priv,
908 "bla_purge_claims(): %pM, vid %d, time out\n",
909 claim->addr, claim->vid);
910
911purge_now:
912 handle_unclaim(bat_priv, primary_if,
913 claim->backbone_gw->orig,
914 claim->addr, claim->vid);
915 }
916 rcu_read_unlock();
917 }
918}
919
920/**
921 * @bat_priv: the bat priv with all the soft interface information
922 * @primary_if: the new selected primary_if
923 * @oldif: the old primary interface, may be NULL
924 *
925 * Update the backbone gateways when the own orig address changes.
926 *
927 */
928void bla_update_orig_address(struct bat_priv *bat_priv,
929 struct hard_iface *primary_if,
930 struct hard_iface *oldif)
931{
932 struct backbone_gw *backbone_gw;
933 struct hlist_node *node;
934 struct hlist_head *head;
935 struct hashtable_t *hash;
936 int i;
937
938 if (!oldif) {
939 bla_purge_claims(bat_priv, NULL, 1);
940 bla_purge_backbone_gw(bat_priv, 1);
941 return;
942 }
943
944 hash = bat_priv->backbone_hash;
945 if (!hash)
946 return;
947
948 for (i = 0; i < hash->size; i++) {
949 head = &hash->table[i];
950
951 rcu_read_lock();
952 hlist_for_each_entry_rcu(backbone_gw, node, head, hash_entry) {
953 /* own orig still holds the old value. */
954 if (!compare_eth(backbone_gw->orig,
955 oldif->net_dev->dev_addr))
956 continue;
957
958 memcpy(backbone_gw->orig,
959 primary_if->net_dev->dev_addr, ETH_ALEN);
960 /* send an announce frame so others will ask for our
961 * claims and update their tables.
962 */
963 bla_send_announce(bat_priv, backbone_gw);
964 }
965 rcu_read_unlock();
966 }
967}
968
969
970
971/* (re)start the timer */
972static void bla_start_timer(struct bat_priv *bat_priv)
973{
974 INIT_DELAYED_WORK(&bat_priv->bla_work, bla_periodic_work);
975 queue_delayed_work(bat_event_workqueue, &bat_priv->bla_work,
976 msecs_to_jiffies(BLA_PERIOD_LENGTH));
977}
978
979/* periodic work to do:
980 * * purge structures when they are too old
981 * * send announcements
982 */
983static void bla_periodic_work(struct work_struct *work)
984{
985 struct delayed_work *delayed_work =
986 container_of(work, struct delayed_work, work);
987 struct bat_priv *bat_priv =
988 container_of(delayed_work, struct bat_priv, bla_work);
989 struct hlist_node *node;
990 struct hlist_head *head;
991 struct backbone_gw *backbone_gw;
992 struct hashtable_t *hash;
993 struct hard_iface *primary_if;
994 int i;
995
996 primary_if = primary_if_get_selected(bat_priv);
997 if (!primary_if)
998 goto out;
999
1000 bla_purge_claims(bat_priv, primary_if, 0);
1001 bla_purge_backbone_gw(bat_priv, 0);
1002
1003 if (!atomic_read(&bat_priv->bridge_loop_avoidance))
1004 goto out;
1005
1006 hash = bat_priv->backbone_hash;
1007 if (!hash)
1008 goto out;
1009
1010 for (i = 0; i < hash->size; i++) {
1011 head = &hash->table[i];
1012
1013 rcu_read_lock();
1014 hlist_for_each_entry_rcu(backbone_gw, node, head, hash_entry) {
1015 if (!compare_eth(backbone_gw->orig,
1016 primary_if->net_dev->dev_addr))
1017 continue;
1018
1019 backbone_gw->lasttime = jiffies;
1020
1021 bla_send_announce(bat_priv, backbone_gw);
1022 }
1023 rcu_read_unlock();
1024 }
1025out:
1026 if (primary_if)
1027 hardif_free_ref(primary_if);
1028
1029 bla_start_timer(bat_priv);
1030}
1031
1032/* initialize all bla structures */
1033int bla_init(struct bat_priv *bat_priv)
1034{
1035 bat_dbg(DBG_BLA, bat_priv, "bla hash registering\n");
1036
1037 if (bat_priv->claim_hash)
1038 return 1;
1039
1040 bat_priv->claim_hash = hash_new(128);
1041 bat_priv->backbone_hash = hash_new(32);
1042
1043 if (!bat_priv->claim_hash || !bat_priv->backbone_hash)
1044 return -1;
1045
1046 bat_dbg(DBG_BLA, bat_priv, "bla hashes initialized\n");
1047
1048 bla_start_timer(bat_priv);
1049 return 1;
1050}
1051
1052/**
1053 * @skb: the frame to be checked
1054 * @orig_node: the orig_node of the frame
1055 * @hdr_size: maximum length of the frame
1056 *
1057 * bla_is_backbone_gw inspects the skb for the VLAN ID and returns 1
1058 * if the orig_node is also a gateway on the soft interface, otherwise it
1059 * returns 0.
1060 *
1061 */
1062int bla_is_backbone_gw(struct sk_buff *skb,
1063 struct orig_node *orig_node, int hdr_size)
1064{
1065 struct ethhdr *ethhdr;
1066 struct vlan_ethhdr *vhdr;
1067 struct backbone_gw *backbone_gw;
1068 short vid = -1;
1069
1070 if (!atomic_read(&orig_node->bat_priv->bridge_loop_avoidance))
1071 return 0;
1072
1073 /* first, find out the vid. */
1074 if (!pskb_may_pull(skb, hdr_size + sizeof(struct ethhdr)))
1075 return 0;
1076
1077 ethhdr = (struct ethhdr *)(((uint8_t *)skb->data) + hdr_size);
1078
1079 if (ntohs(ethhdr->h_proto) == ETH_P_8021Q) {
1080 if (!pskb_may_pull(skb, hdr_size + sizeof(struct vlan_ethhdr)))
1081 return 0;
1082
1083 vhdr = (struct vlan_ethhdr *)(((uint8_t *)skb->data) +
1084 hdr_size);
1085 vid = ntohs(vhdr->h_vlan_TCI) & VLAN_VID_MASK;
1086 }
1087
1088 /* see if this originator is a backbone gw for this VLAN */
1089
1090 backbone_gw = backbone_hash_find(orig_node->bat_priv,
1091 orig_node->orig, vid);
1092 if (!backbone_gw)
1093 return 0;
1094
1095 backbone_gw_free_ref(backbone_gw);
1096 return 1;
1097}
1098
1099/* free all bla structures (for softinterface free or module unload) */
1100void bla_free(struct bat_priv *bat_priv)
1101{
1102 struct hard_iface *primary_if;
1103
1104 cancel_delayed_work_sync(&bat_priv->bla_work);
1105 primary_if = primary_if_get_selected(bat_priv);
1106
1107 if (bat_priv->claim_hash) {
1108 bla_purge_claims(bat_priv, primary_if, 1);
1109 hash_destroy(bat_priv->claim_hash);
1110 bat_priv->claim_hash = NULL;
1111 }
1112 if (bat_priv->backbone_hash) {
1113 bla_purge_backbone_gw(bat_priv, 1);
1114 hash_destroy(bat_priv->backbone_hash);
1115 bat_priv->backbone_hash = NULL;
1116 }
1117 if (primary_if)
1118 hardif_free_ref(primary_if);
1119}
1120
1121/**
1122 * @bat_priv: the bat priv with all the soft interface information
1123 * @skb: the frame to be checked
1124 * @vid: the VLAN ID of the frame
1125 *
1126 * bla_rx avoidance checks if:
1127 * * we have to race for a claim
1128 * * if the frame is allowed on the LAN
1129 *
1130 * in these cases, the skb is further handled by this function and
1131 * returns 1, otherwise it returns 0 and the caller shall further
1132 * process the skb.
1133 *
1134 */
1135int bla_rx(struct bat_priv *bat_priv, struct sk_buff *skb, short vid)
1136{
1137 struct ethhdr *ethhdr;
1138 struct claim search_claim, *claim = NULL;
1139 struct hard_iface *primary_if;
1140 int ret;
1141
1142 ethhdr = (struct ethhdr *)skb_mac_header(skb);
1143
1144 primary_if = primary_if_get_selected(bat_priv);
1145 if (!primary_if)
1146 goto handled;
1147
1148 if (!atomic_read(&bat_priv->bridge_loop_avoidance))
1149 goto allow;
1150
1151
1152 if (unlikely(atomic_read(&bat_priv->bla_num_requests)))
1153 /* don't allow broadcasts while requests are in flight */
1154 if (is_multicast_ether_addr(ethhdr->h_dest))
1155 goto handled;
1156
1157 memcpy(search_claim.addr, ethhdr->h_source, ETH_ALEN);
1158 search_claim.vid = vid;
1159 claim = claim_hash_find(bat_priv, &search_claim);
1160
1161 if (!claim) {
1162 /* possible optimization: race for a claim */
1163 /* No claim exists yet, claim it for us!
1164 */
1165 handle_claim(bat_priv, primary_if,
1166 primary_if->net_dev->dev_addr,
1167 ethhdr->h_source, vid);
1168 goto allow;
1169 }
1170
1171 /* if it is our own claim ... */
1172 if (compare_eth(claim->backbone_gw->orig,
1173 primary_if->net_dev->dev_addr)) {
1174 /* ... allow it in any case */
1175 claim->lasttime = jiffies;
1176 goto allow;
1177 }
1178
1179 /* if it is a broadcast ... */
1180 if (is_multicast_ether_addr(ethhdr->h_dest)) {
1181 /* ... drop it. the responsible gateway is in charge. */
1182 goto handled;
1183 } else {
1184 /* seems the client considers us as its best gateway.
1185 * send a claim and update the claim table
1186 * immediately.
1187 */
1188 handle_claim(bat_priv, primary_if,
1189 primary_if->net_dev->dev_addr,
1190 ethhdr->h_source, vid);
1191 goto allow;
1192 }
1193allow:
1194 bla_update_own_backbone_gw(bat_priv, primary_if, vid);
1195 ret = 0;
1196 goto out;
1197
1198handled:
1199 kfree_skb(skb);
1200 ret = 1;
1201
1202out:
1203 if (primary_if)
1204 hardif_free_ref(primary_if);
1205 if (claim)
1206 claim_free_ref(claim);
1207 return ret;
1208}
1209
1210/**
1211 * @bat_priv: the bat priv with all the soft interface information
1212 * @skb: the frame to be checked
1213 * @vid: the VLAN ID of the frame
1214 *
1215 * bla_tx checks if:
1216 * * a claim was received which has to be processed
1217 * * the frame is allowed on the mesh
1218 *
1219 * in these cases, the skb is further handled by this function and
1220 * returns 1, otherwise it returns 0 and the caller shall further
1221 * process the skb.
1222 *
1223 */
1224int bla_tx(struct bat_priv *bat_priv, struct sk_buff *skb, short vid)
1225{
1226 struct ethhdr *ethhdr;
1227 struct claim search_claim, *claim = NULL;
1228 struct hard_iface *primary_if;
1229 int ret = 0;
1230
1231 primary_if = primary_if_get_selected(bat_priv);
1232 if (!primary_if)
1233 goto out;
1234
1235 if (!atomic_read(&bat_priv->bridge_loop_avoidance))
1236 goto allow;
1237
1238 /* in VLAN case, the mac header might not be set. */
1239 skb_reset_mac_header(skb);
1240
1241 if (bla_process_claim(bat_priv, primary_if, skb))
1242 goto handled;
1243
1244 ethhdr = (struct ethhdr *)skb_mac_header(skb);
1245
1246 if (unlikely(atomic_read(&bat_priv->bla_num_requests)))
1247 /* don't allow broadcasts while requests are in flight */
1248 if (is_multicast_ether_addr(ethhdr->h_dest))
1249 goto handled;
1250
1251 memcpy(search_claim.addr, ethhdr->h_source, ETH_ALEN);
1252 search_claim.vid = vid;
1253
1254 claim = claim_hash_find(bat_priv, &search_claim);
1255
1256 /* if no claim exists, allow it. */
1257 if (!claim)
1258 goto allow;
1259
1260 /* check if we are responsible. */
1261 if (compare_eth(claim->backbone_gw->orig,
1262 primary_if->net_dev->dev_addr)) {
1263 /* if yes, the client has roamed and we have
1264 * to unclaim it.
1265 */
1266 handle_unclaim(bat_priv, primary_if,
1267 primary_if->net_dev->dev_addr,
1268 ethhdr->h_source, vid);
1269 goto allow;
1270 }
1271
1272 /* check if it is a multicast/broadcast frame */
1273 if (is_multicast_ether_addr(ethhdr->h_dest)) {
1274 /* drop it. the responsible gateway has forwarded it into
1275 * the backbone network.
1276 */
1277 goto handled;
1278 } else {
1279 /* we must allow it. at least if we are
1280 * responsible for the DESTINATION.
1281 */
1282 goto allow;
1283 }
1284allow:
1285 bla_update_own_backbone_gw(bat_priv, primary_if, vid);
1286 ret = 0;
1287 goto out;
1288handled:
1289 ret = 1;
1290out:
1291 if (primary_if)
1292 hardif_free_ref(primary_if);
1293 if (claim)
1294 claim_free_ref(claim);
1295 return ret;
1296}
diff --git a/net/batman-adv/bridge_loop_avoidance.h b/net/batman-adv/bridge_loop_avoidance.h
new file mode 100644
index 000000000000..488d77895d01
--- /dev/null
+++ b/net/batman-adv/bridge_loop_avoidance.h
@@ -0,0 +1,37 @@
1/*
2 * Copyright (C) 2011 B.A.T.M.A.N. contributors:
3 *
4 * Simon Wunderlich
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of version 2 of the GNU General Public
8 * License as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18 * 02110-1301, USA
19 *
20 */
21
22#ifndef _NET_BATMAN_ADV_BLA_H_
23#define _NET_BATMAN_ADV_BLA_H_
24
25int bla_rx(struct bat_priv *bat_priv, struct sk_buff *skb, short vid);
26int bla_tx(struct bat_priv *bat_priv, struct sk_buff *skb, short vid);
27int bla_is_backbone_gw(struct sk_buff *skb,
28 struct orig_node *orig_node, int hdr_size);
29void bla_update_orig_address(struct bat_priv *bat_priv,
30 struct hard_iface *primary_if,
31 struct hard_iface *oldif);
32int bla_init(struct bat_priv *bat_priv);
33void bla_free(struct bat_priv *bat_priv);
34
35#define BLA_CRC_INIT 0
36
37#endif /* ifndef _NET_BATMAN_ADV_BLA_H_ */
diff --git a/net/batman-adv/hard-interface.c b/net/batman-adv/hard-interface.c
index 377897701a85..8c4b790b98be 100644
--- a/net/batman-adv/hard-interface.c
+++ b/net/batman-adv/hard-interface.c
@@ -28,6 +28,7 @@
28#include "bat_sysfs.h" 28#include "bat_sysfs.h"
29#include "originator.h" 29#include "originator.h"
30#include "hash.h" 30#include "hash.h"
31#include "bridge_loop_avoidance.h"
31 32
32#include <linux/if_arp.h> 33#include <linux/if_arp.h>
33 34
@@ -107,7 +108,8 @@ out:
107 return hard_iface; 108 return hard_iface;
108} 109}
109 110
110static void primary_if_update_addr(struct bat_priv *bat_priv) 111static void primary_if_update_addr(struct bat_priv *bat_priv,
112 struct hard_iface *oldif)
111{ 113{
112 struct vis_packet *vis_packet; 114 struct vis_packet *vis_packet;
113 struct hard_iface *primary_if; 115 struct hard_iface *primary_if;
@@ -122,6 +124,7 @@ static void primary_if_update_addr(struct bat_priv *bat_priv)
122 memcpy(vis_packet->sender_orig, 124 memcpy(vis_packet->sender_orig,
123 primary_if->net_dev->dev_addr, ETH_ALEN); 125 primary_if->net_dev->dev_addr, ETH_ALEN);
124 126
127 bla_update_orig_address(bat_priv, primary_if, oldif);
125out: 128out:
126 if (primary_if) 129 if (primary_if)
127 hardif_free_ref(primary_if); 130 hardif_free_ref(primary_if);
@@ -140,14 +143,15 @@ static void primary_if_select(struct bat_priv *bat_priv,
140 curr_hard_iface = rcu_dereference_protected(bat_priv->primary_if, 1); 143 curr_hard_iface = rcu_dereference_protected(bat_priv->primary_if, 1);
141 rcu_assign_pointer(bat_priv->primary_if, new_hard_iface); 144 rcu_assign_pointer(bat_priv->primary_if, new_hard_iface);
142 145
143 if (curr_hard_iface)
144 hardif_free_ref(curr_hard_iface);
145
146 if (!new_hard_iface) 146 if (!new_hard_iface)
147 return; 147 goto out;
148 148
149 bat_priv->bat_algo_ops->bat_ogm_init_primary(new_hard_iface); 149 bat_priv->bat_algo_ops->bat_ogm_init_primary(new_hard_iface);
150 primary_if_update_addr(bat_priv); 150 primary_if_update_addr(bat_priv, curr_hard_iface);
151
152out:
153 if (curr_hard_iface)
154 hardif_free_ref(curr_hard_iface);
151} 155}
152 156
153static bool hardif_is_iface_up(const struct hard_iface *hard_iface) 157static bool hardif_is_iface_up(const struct hard_iface *hard_iface)
@@ -531,7 +535,7 @@ static int hard_if_event(struct notifier_block *this,
531 goto hardif_put; 535 goto hardif_put;
532 536
533 if (hard_iface == primary_if) 537 if (hard_iface == primary_if)
534 primary_if_update_addr(bat_priv); 538 primary_if_update_addr(bat_priv, NULL);
535 break; 539 break;
536 default: 540 default:
537 break; 541 break;
diff --git a/net/batman-adv/main.c b/net/batman-adv/main.c
index 94d4968a953a..e67ca96285b3 100644
--- a/net/batman-adv/main.c
+++ b/net/batman-adv/main.c
@@ -30,6 +30,7 @@
30#include "translation-table.h" 30#include "translation-table.h"
31#include "hard-interface.h" 31#include "hard-interface.h"
32#include "gateway_client.h" 32#include "gateway_client.h"
33#include "bridge_loop_avoidance.h"
33#include "vis.h" 34#include "vis.h"
34#include "hash.h" 35#include "hash.h"
35#include "bat_algo.h" 36#include "bat_algo.h"
@@ -115,6 +116,9 @@ int mesh_init(struct net_device *soft_iface)
115 if (vis_init(bat_priv) < 1) 116 if (vis_init(bat_priv) < 1)
116 goto err; 117 goto err;
117 118
119 if (bla_init(bat_priv) < 1)
120 goto err;
121
118 atomic_set(&bat_priv->gw_reselect, 0); 122 atomic_set(&bat_priv->gw_reselect, 0);
119 atomic_set(&bat_priv->mesh_state, MESH_ACTIVE); 123 atomic_set(&bat_priv->mesh_state, MESH_ACTIVE);
120 goto end; 124 goto end;
@@ -142,6 +146,8 @@ void mesh_free(struct net_device *soft_iface)
142 146
143 tt_free(bat_priv); 147 tt_free(bat_priv);
144 148
149 bla_free(bat_priv);
150
145 atomic_set(&bat_priv->mesh_state, MESH_INACTIVE); 151 atomic_set(&bat_priv->mesh_state, MESH_INACTIVE);
146} 152}
147 153
diff --git a/net/batman-adv/main.h b/net/batman-adv/main.h
index 7a6a25f22fc2..82723b5dce61 100644
--- a/net/batman-adv/main.h
+++ b/net/batman-adv/main.h
@@ -80,6 +80,9 @@
80#define MAX_AGGREGATION_BYTES 512 80#define MAX_AGGREGATION_BYTES 512
81#define MAX_AGGREGATION_MS 100 81#define MAX_AGGREGATION_MS 100
82 82
83#define BLA_PERIOD_LENGTH 10000 /* 10 seconds */
84#define BLA_BACKBONE_TIMEOUT (BLA_PERIOD_LENGTH * 3)
85#define BLA_CLAIM_TIMEOUT (BLA_PERIOD_LENGTH * 10)
83/* don't reset again within 30 seconds */ 86/* don't reset again within 30 seconds */
84#define RESET_PROTECTION_MS 30000 87#define RESET_PROTECTION_MS 30000
85#define EXPECTED_SEQNO_RANGE 65536 88#define EXPECTED_SEQNO_RANGE 65536
@@ -117,7 +120,8 @@ enum dbg_level {
117 DBG_BATMAN = 1 << 0, 120 DBG_BATMAN = 1 << 0,
118 DBG_ROUTES = 1 << 1, /* route added / changed / deleted */ 121 DBG_ROUTES = 1 << 1, /* route added / changed / deleted */
119 DBG_TT = 1 << 2, /* translation table operations */ 122 DBG_TT = 1 << 2, /* translation table operations */
120 DBG_ALL = 7 123 DBG_BLA = 1 << 3, /* bridge loop avoidance */
124 DBG_ALL = 15
121}; 125};
122 126
123/* Kernel headers */ 127/* Kernel headers */
diff --git a/net/batman-adv/originator.c b/net/batman-adv/originator.c
index 82390818874b..ce4969885894 100644
--- a/net/batman-adv/originator.c
+++ b/net/batman-adv/originator.c
@@ -28,6 +28,7 @@
28#include "hard-interface.h" 28#include "hard-interface.h"
29#include "unicast.h" 29#include "unicast.h"
30#include "soft-interface.h" 30#include "soft-interface.h"
31#include "bridge_loop_avoidance.h"
31 32
32static void purge_orig(struct work_struct *work); 33static void purge_orig(struct work_struct *work);
33 34
diff --git a/net/batman-adv/packet.h b/net/batman-adv/packet.h
index 45e75ed6637d..59800e82371a 100644
--- a/net/batman-adv/packet.h
+++ b/net/batman-adv/packet.h
@@ -90,6 +90,23 @@ enum tt_client_flags {
90 TT_CLIENT_PENDING = 1 << 10 90 TT_CLIENT_PENDING = 1 << 10
91}; 91};
92 92
93/* claim frame types for the bridge loop avoidance */
94enum bla_claimframe {
95 CLAIM_TYPE_ADD = 0x00,
96 CLAIM_TYPE_DEL = 0x01,
97 CLAIM_TYPE_ANNOUNCE = 0x02,
98 CLAIM_TYPE_REQUEST = 0x03
99};
100
101/* the destination hardware field in the ARP frame is used to
102 * transport the claim type and the group id
103 */
104struct bla_claim_dst {
105 uint8_t magic[3]; /* FF:43:05 */
106 uint8_t type; /* bla_claimframe */
107 uint16_t group; /* group id */
108} __packed;
109
93struct batman_header { 110struct batman_header {
94 uint8_t packet_type; 111 uint8_t packet_type;
95 uint8_t version; /* batman version field */ 112 uint8_t version; /* batman version field */
diff --git a/net/batman-adv/routing.c b/net/batman-adv/routing.c
index 71d4211beb23..a1d8c9b0f902 100644
--- a/net/batman-adv/routing.c
+++ b/net/batman-adv/routing.c
@@ -29,6 +29,7 @@
29#include "originator.h" 29#include "originator.h"
30#include "vis.h" 30#include "vis.h"
31#include "unicast.h" 31#include "unicast.h"
32#include "bridge_loop_avoidance.h"
32 33
33static int route_unicast_packet(struct sk_buff *skb, 34static int route_unicast_packet(struct sk_buff *skb,
34 struct hard_iface *recv_if); 35 struct hard_iface *recv_if);
@@ -1071,6 +1072,12 @@ int recv_bcast_packet(struct sk_buff *skb, struct hard_iface *recv_if)
1071 /* rebroadcast packet */ 1072 /* rebroadcast packet */
1072 add_bcast_packet_to_list(bat_priv, skb, 1); 1073 add_bcast_packet_to_list(bat_priv, skb, 1);
1073 1074
1075 /* don't hand the broadcast up if it is from an originator
1076 * from the same backbone.
1077 */
1078 if (bla_is_backbone_gw(skb, orig_node, hdr_size))
1079 goto out;
1080
1074 /* broadcast for me */ 1081 /* broadcast for me */
1075 interface_rx(recv_if->soft_iface, skb, recv_if, hdr_size); 1082 interface_rx(recv_if->soft_iface, skb, recv_if, hdr_size);
1076 ret = NET_RX_SUCCESS; 1083 ret = NET_RX_SUCCESS;
diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c
index e56cb88ef2ba..4d639b303bd7 100644
--- a/net/batman-adv/soft-interface.c
+++ b/net/batman-adv/soft-interface.c
@@ -36,6 +36,7 @@
36#include <linux/etherdevice.h> 36#include <linux/etherdevice.h>
37#include <linux/if_vlan.h> 37#include <linux/if_vlan.h>
38#include "unicast.h" 38#include "unicast.h"
39#include "bridge_loop_avoidance.h"
39 40
40 41
41static int bat_get_settings(struct net_device *dev, struct ethtool_cmd *cmd); 42static int bat_get_settings(struct net_device *dev, struct ethtool_cmd *cmd);
@@ -152,6 +153,9 @@ static int interface_tx(struct sk_buff *skb, struct net_device *soft_iface)
152 goto dropped; 153 goto dropped;
153 } 154 }
154 155
156 if (bla_tx(bat_priv, skb, vid))
157 goto dropped;
158
155 /* Register the client MAC in the transtable */ 159 /* Register the client MAC in the transtable */
156 tt_local_add(soft_iface, ethhdr->h_source, skb->skb_iif); 160 tt_local_add(soft_iface, ethhdr->h_source, skb->skb_iif);
157 161
@@ -287,6 +291,12 @@ void interface_rx(struct net_device *soft_iface,
287 if (is_ap_isolated(bat_priv, ethhdr->h_source, ethhdr->h_dest)) 291 if (is_ap_isolated(bat_priv, ethhdr->h_source, ethhdr->h_dest))
288 goto dropped; 292 goto dropped;
289 293
294 /* Let the bridge loop avoidance check the packet. If will
295 * not handle it, we can safely push it up.
296 */
297 if (bla_rx(bat_priv, skb, vid))
298 goto out;
299
290 netif_rx(skb); 300 netif_rx(skb);
291 goto out; 301 goto out;
292 302
@@ -354,6 +364,7 @@ struct net_device *softif_create(const char *name)
354 364
355 atomic_set(&bat_priv->aggregated_ogms, 1); 365 atomic_set(&bat_priv->aggregated_ogms, 1);
356 atomic_set(&bat_priv->bonding, 0); 366 atomic_set(&bat_priv->bonding, 0);
367 atomic_set(&bat_priv->bridge_loop_avoidance, 0);
357 atomic_set(&bat_priv->ap_isolation, 0); 368 atomic_set(&bat_priv->ap_isolation, 0);
358 atomic_set(&bat_priv->vis_mode, VIS_TYPE_CLIENT_UPDATE); 369 atomic_set(&bat_priv->vis_mode, VIS_TYPE_CLIENT_UPDATE);
359 atomic_set(&bat_priv->gw_mode, GW_MODE_OFF); 370 atomic_set(&bat_priv->gw_mode, GW_MODE_OFF);
@@ -371,6 +382,7 @@ struct net_device *softif_create(const char *name)
371 atomic_set(&bat_priv->ttvn, 0); 382 atomic_set(&bat_priv->ttvn, 0);
372 atomic_set(&bat_priv->tt_local_changes, 0); 383 atomic_set(&bat_priv->tt_local_changes, 0);
373 atomic_set(&bat_priv->tt_ogm_append_cnt, 0); 384 atomic_set(&bat_priv->tt_ogm_append_cnt, 0);
385 atomic_set(&bat_priv->bla_num_requests, 0);
374 386
375 bat_priv->tt_buff = NULL; 387 bat_priv->tt_buff = NULL;
376 bat_priv->tt_buff_len = 0; 388 bat_priv->tt_buff_len = 0;
diff --git a/net/batman-adv/types.h b/net/batman-adv/types.h
index feac2f4030c8..089dd44a29b1 100644
--- a/net/batman-adv/types.h
+++ b/net/batman-adv/types.h
@@ -148,6 +148,7 @@ struct bat_priv {
148 atomic_t bonding; /* boolean */ 148 atomic_t bonding; /* boolean */
149 atomic_t fragmentation; /* boolean */ 149 atomic_t fragmentation; /* boolean */
150 atomic_t ap_isolation; /* boolean */ 150 atomic_t ap_isolation; /* boolean */
151 atomic_t bridge_loop_avoidance; /* boolean */
151 atomic_t vis_mode; /* VIS_TYPE_* */ 152 atomic_t vis_mode; /* VIS_TYPE_* */
152 atomic_t gw_mode; /* GW_MODE_* */ 153 atomic_t gw_mode; /* GW_MODE_* */
153 atomic_t gw_sel_class; /* uint */ 154 atomic_t gw_sel_class; /* uint */
@@ -161,6 +162,7 @@ struct bat_priv {
161 atomic_t ttvn; /* translation table version number */ 162 atomic_t ttvn; /* translation table version number */
162 atomic_t tt_ogm_append_cnt; 163 atomic_t tt_ogm_append_cnt;
163 atomic_t tt_local_changes; /* changes registered in a OGM interval */ 164 atomic_t tt_local_changes; /* changes registered in a OGM interval */
165 atomic_t bla_num_requests; /* number of bla requests in flight */
164 /* The tt_poss_change flag is used to detect an ongoing roaming phase. 166 /* The tt_poss_change flag is used to detect an ongoing roaming phase.
165 * If true, then I received a Roaming_adv and I have to inspect every 167 * If true, then I received a Roaming_adv and I have to inspect every
166 * packet directed to me to check whether I am still the true 168 * packet directed to me to check whether I am still the true
@@ -179,6 +181,8 @@ struct bat_priv {
179 struct hashtable_t *orig_hash; 181 struct hashtable_t *orig_hash;
180 struct hashtable_t *tt_local_hash; 182 struct hashtable_t *tt_local_hash;
181 struct hashtable_t *tt_global_hash; 183 struct hashtable_t *tt_global_hash;
184 struct hashtable_t *claim_hash;
185 struct hashtable_t *backbone_hash;
182 struct list_head tt_req_list; /* list of pending tt_requests */ 186 struct list_head tt_req_list; /* list of pending tt_requests */
183 struct list_head tt_roam_list; 187 struct list_head tt_roam_list;
184 struct hashtable_t *vis_hash; 188 struct hashtable_t *vis_hash;
@@ -199,6 +203,7 @@ struct bat_priv {
199 struct delayed_work tt_work; 203 struct delayed_work tt_work;
200 struct delayed_work orig_work; 204 struct delayed_work orig_work;
201 struct delayed_work vis_work; 205 struct delayed_work vis_work;
206 struct delayed_work bla_work;
202 struct gw_node __rcu *curr_gw; /* rcu protected pointer */ 207 struct gw_node __rcu *curr_gw; /* rcu protected pointer */
203 atomic_t gw_reselect; 208 atomic_t gw_reselect;
204 struct hard_iface __rcu *primary_if; /* rcu protected pointer */ 209 struct hard_iface __rcu *primary_if; /* rcu protected pointer */
@@ -241,6 +246,28 @@ struct tt_global_entry {
241 unsigned long roam_at; /* time at which TT_GLOBAL_ROAM was set */ 246 unsigned long roam_at; /* time at which TT_GLOBAL_ROAM was set */
242}; 247};
243 248
249struct backbone_gw {
250 uint8_t orig[ETH_ALEN];
251 short vid; /* used VLAN ID */
252 struct hlist_node hash_entry;
253 struct bat_priv *bat_priv;
254 unsigned long lasttime; /* last time we heard of this backbone gw */
255 atomic_t request_sent;
256 atomic_t refcount;
257 struct rcu_head rcu;
258 uint16_t crc; /* crc checksum over all claims */
259};
260
261struct claim {
262 uint8_t addr[ETH_ALEN];
263 short vid;
264 struct backbone_gw *backbone_gw;
265 unsigned long lasttime; /* last time we heard of claim (locals only) */
266 struct rcu_head rcu;
267 atomic_t refcount;
268 struct hlist_node hash_entry;
269};
270
244struct tt_change_node { 271struct tt_change_node {
245 struct list_head list; 272 struct list_head list;
246 struct tt_change change; 273 struct tt_change change;