aboutsummaryrefslogtreecommitdiffstats
path: root/net/batman-adv
diff options
context:
space:
mode:
Diffstat (limited to 'net/batman-adv')
-rw-r--r--net/batman-adv/Kconfig1
-rw-r--r--net/batman-adv/aggregation.c73
-rw-r--r--net/batman-adv/aggregation.h13
-rw-r--r--net/batman-adv/bat_debugfs.c11
-rw-r--r--net/batman-adv/bat_sysfs.c102
-rw-r--r--net/batman-adv/bat_sysfs.h2
-rw-r--r--net/batman-adv/bitarray.c12
-rw-r--r--net/batman-adv/bitarray.h10
-rw-r--r--net/batman-adv/gateway_client.c268
-rw-r--r--net/batman-adv/gateway_client.h3
-rw-r--r--net/batman-adv/gateway_common.c25
-rw-r--r--net/batman-adv/hard-interface.c46
-rw-r--r--net/batman-adv/hard-interface.h20
-rw-r--r--net/batman-adv/hash.c7
-rw-r--r--net/batman-adv/hash.h6
-rw-r--r--net/batman-adv/icmp_socket.c4
-rw-r--r--net/batman-adv/main.c31
-rw-r--r--net/batman-adv/main.h85
-rw-r--r--net/batman-adv/originator.c36
-rw-r--r--net/batman-adv/originator.h18
-rw-r--r--net/batman-adv/packet.h142
-rw-r--r--net/batman-adv/ring_buffer.c4
-rw-r--r--net/batman-adv/ring_buffer.h2
-rw-r--r--net/batman-adv/routing.c376
-rw-r--r--net/batman-adv/routing.h15
-rw-r--r--net/batman-adv/send.c147
-rw-r--r--net/batman-adv/send.h14
-rw-r--r--net/batman-adv/soft-interface.c72
-rw-r--r--net/batman-adv/soft-interface.h5
-rw-r--r--net/batman-adv/translation-table.c1584
-rw-r--r--net/batman-adv/translation-table.h49
-rw-r--r--net/batman-adv/types.h76
-rw-r--r--net/batman-adv/unicast.c33
-rw-r--r--net/batman-adv/unicast.h8
-rw-r--r--net/batman-adv/vis.c104
35 files changed, 2593 insertions, 811 deletions
diff --git a/net/batman-adv/Kconfig b/net/batman-adv/Kconfig
index 6c051ad833e..2b68d068eaf 100644
--- a/net/batman-adv/Kconfig
+++ b/net/batman-adv/Kconfig
@@ -5,6 +5,7 @@
5config BATMAN_ADV 5config BATMAN_ADV
6 tristate "B.A.T.M.A.N. Advanced Meshing Protocol" 6 tristate "B.A.T.M.A.N. Advanced Meshing Protocol"
7 depends on NET 7 depends on NET
8 select CRC16
8 default n 9 default n
9 ---help--- 10 ---help---
10 11
diff --git a/net/batman-adv/aggregation.c b/net/batman-adv/aggregation.c
index a8c32030527..69467fe71ff 100644
--- a/net/batman-adv/aggregation.c
+++ b/net/batman-adv/aggregation.c
@@ -20,28 +20,26 @@
20 */ 20 */
21 21
22#include "main.h" 22#include "main.h"
23#include "translation-table.h"
23#include "aggregation.h" 24#include "aggregation.h"
24#include "send.h" 25#include "send.h"
25#include "routing.h" 26#include "routing.h"
26#include "hard-interface.h" 27#include "hard-interface.h"
27 28
28/* calculate the size of the tt information for a given packet */
29static int tt_len(struct batman_packet *batman_packet)
30{
31 return batman_packet->num_tt * ETH_ALEN;
32}
33
34/* return true if new_packet can be aggregated with forw_packet */ 29/* return true if new_packet can be aggregated with forw_packet */
35static bool can_aggregate_with(struct batman_packet *new_batman_packet, 30static bool can_aggregate_with(const struct batman_packet *new_batman_packet,
31 struct bat_priv *bat_priv,
36 int packet_len, 32 int packet_len,
37 unsigned long send_time, 33 unsigned long send_time,
38 bool directlink, 34 bool directlink,
39 struct hard_iface *if_incoming, 35 const struct hard_iface *if_incoming,
40 struct forw_packet *forw_packet) 36 const struct forw_packet *forw_packet)
41{ 37{
42 struct batman_packet *batman_packet = 38 struct batman_packet *batman_packet =
43 (struct batman_packet *)forw_packet->skb->data; 39 (struct batman_packet *)forw_packet->skb->data;
44 int aggregated_bytes = forw_packet->packet_len + packet_len; 40 int aggregated_bytes = forw_packet->packet_len + packet_len;
41 struct hard_iface *primary_if = NULL;
42 bool res = false;
45 43
46 /** 44 /**
47 * we can aggregate the current packet to this aggregated packet 45 * we can aggregate the current packet to this aggregated packet
@@ -66,6 +64,10 @@ static bool can_aggregate_with(struct batman_packet *new_batman_packet,
66 * packet 64 * packet
67 */ 65 */
68 66
67 primary_if = primary_if_get_selected(bat_priv);
68 if (!primary_if)
69 goto out;
70
69 /* packets without direct link flag and high TTL 71 /* packets without direct link flag and high TTL
70 * are flooded through the net */ 72 * are flooded through the net */
71 if ((!directlink) && 73 if ((!directlink) &&
@@ -75,8 +77,10 @@ static bool can_aggregate_with(struct batman_packet *new_batman_packet,
75 /* own packets originating non-primary 77 /* own packets originating non-primary
76 * interfaces leave only that interface */ 78 * interfaces leave only that interface */
77 ((!forw_packet->own) || 79 ((!forw_packet->own) ||
78 (forw_packet->if_incoming->if_num == 0))) 80 (forw_packet->if_incoming == primary_if))) {
79 return true; 81 res = true;
82 goto out;
83 }
80 84
81 /* if the incoming packet is sent via this one 85 /* if the incoming packet is sent via this one
82 * interface only - we still can aggregate */ 86 * interface only - we still can aggregate */
@@ -89,16 +93,22 @@ static bool can_aggregate_with(struct batman_packet *new_batman_packet,
89 * (= secondary interface packets in general) */ 93 * (= secondary interface packets in general) */
90 (batman_packet->flags & DIRECTLINK || 94 (batman_packet->flags & DIRECTLINK ||
91 (forw_packet->own && 95 (forw_packet->own &&
92 forw_packet->if_incoming->if_num != 0))) 96 forw_packet->if_incoming != primary_if))) {
93 return true; 97 res = true;
98 goto out;
99 }
94 } 100 }
95 101
96 return false; 102out:
103 if (primary_if)
104 hardif_free_ref(primary_if);
105 return res;
97} 106}
98 107
99/* create a new aggregated packet and add this packet to it */ 108/* create a new aggregated packet and add this packet to it */
100static void new_aggregated_packet(unsigned char *packet_buff, int packet_len, 109static void new_aggregated_packet(const unsigned char *packet_buff,
101 unsigned long send_time, bool direct_link, 110 int packet_len, unsigned long send_time,
111 bool direct_link,
102 struct hard_iface *if_incoming, 112 struct hard_iface *if_incoming,
103 int own_packet) 113 int own_packet)
104{ 114{
@@ -118,7 +128,7 @@ static void new_aggregated_packet(unsigned char *packet_buff, int packet_len,
118 } 128 }
119 } 129 }
120 130
121 forw_packet_aggr = kmalloc(sizeof(struct forw_packet), GFP_ATOMIC); 131 forw_packet_aggr = kmalloc(sizeof(*forw_packet_aggr), GFP_ATOMIC);
122 if (!forw_packet_aggr) { 132 if (!forw_packet_aggr) {
123 if (!own_packet) 133 if (!own_packet)
124 atomic_inc(&bat_priv->batman_queue_left); 134 atomic_inc(&bat_priv->batman_queue_left);
@@ -150,7 +160,7 @@ static void new_aggregated_packet(unsigned char *packet_buff, int packet_len,
150 forw_packet_aggr->own = own_packet; 160 forw_packet_aggr->own = own_packet;
151 forw_packet_aggr->if_incoming = if_incoming; 161 forw_packet_aggr->if_incoming = if_incoming;
152 forw_packet_aggr->num_packets = 0; 162 forw_packet_aggr->num_packets = 0;
153 forw_packet_aggr->direct_link_flags = 0; 163 forw_packet_aggr->direct_link_flags = NO_FLAGS;
154 forw_packet_aggr->send_time = send_time; 164 forw_packet_aggr->send_time = send_time;
155 165
156 /* save packet direct link flag status */ 166 /* save packet direct link flag status */
@@ -176,8 +186,7 @@ out:
176 186
177/* aggregate a new packet into the existing aggregation */ 187/* aggregate a new packet into the existing aggregation */
178static void aggregate(struct forw_packet *forw_packet_aggr, 188static void aggregate(struct forw_packet *forw_packet_aggr,
179 unsigned char *packet_buff, 189 const unsigned char *packet_buff, int packet_len,
180 int packet_len,
181 bool direct_link) 190 bool direct_link)
182{ 191{
183 unsigned char *skb_buff; 192 unsigned char *skb_buff;
@@ -195,7 +204,7 @@ static void aggregate(struct forw_packet *forw_packet_aggr,
195 204
196void add_bat_packet_to_list(struct bat_priv *bat_priv, 205void add_bat_packet_to_list(struct bat_priv *bat_priv,
197 unsigned char *packet_buff, int packet_len, 206 unsigned char *packet_buff, int packet_len,
198 struct hard_iface *if_incoming, char own_packet, 207 struct hard_iface *if_incoming, int own_packet,
199 unsigned long send_time) 208 unsigned long send_time)
200{ 209{
201 /** 210 /**
@@ -215,6 +224,7 @@ void add_bat_packet_to_list(struct bat_priv *bat_priv,
215 hlist_for_each_entry(forw_packet_pos, tmp_node, 224 hlist_for_each_entry(forw_packet_pos, tmp_node,
216 &bat_priv->forw_bat_list, list) { 225 &bat_priv->forw_bat_list, list) {
217 if (can_aggregate_with(batman_packet, 226 if (can_aggregate_with(batman_packet,
227 bat_priv,
218 packet_len, 228 packet_len,
219 send_time, 229 send_time,
220 direct_link, 230 direct_link,
@@ -253,8 +263,9 @@ void add_bat_packet_to_list(struct bat_priv *bat_priv,
253} 263}
254 264
255/* unpack the aggregated packets and process them one by one */ 265/* unpack the aggregated packets and process them one by one */
256void receive_aggr_bat_packet(struct ethhdr *ethhdr, unsigned char *packet_buff, 266void receive_aggr_bat_packet(const struct ethhdr *ethhdr,
257 int packet_len, struct hard_iface *if_incoming) 267 unsigned char *packet_buff, int packet_len,
268 struct hard_iface *if_incoming)
258{ 269{
259 struct batman_packet *batman_packet; 270 struct batman_packet *batman_packet;
260 int buff_pos = 0; 271 int buff_pos = 0;
@@ -263,18 +274,20 @@ void receive_aggr_bat_packet(struct ethhdr *ethhdr, unsigned char *packet_buff,
263 batman_packet = (struct batman_packet *)packet_buff; 274 batman_packet = (struct batman_packet *)packet_buff;
264 275
265 do { 276 do {
266 /* network to host order for our 32bit seqno, and the 277 /* network to host order for our 32bit seqno and the
267 orig_interval. */ 278 orig_interval */
268 batman_packet->seqno = ntohl(batman_packet->seqno); 279 batman_packet->seqno = ntohl(batman_packet->seqno);
280 batman_packet->tt_crc = ntohs(batman_packet->tt_crc);
269 281
270 tt_buff = packet_buff + buff_pos + BAT_PACKET_LEN; 282 tt_buff = packet_buff + buff_pos + BAT_PACKET_LEN;
271 receive_bat_packet(ethhdr, batman_packet,
272 tt_buff, tt_len(batman_packet),
273 if_incoming);
274 283
275 buff_pos += BAT_PACKET_LEN + tt_len(batman_packet); 284 receive_bat_packet(ethhdr, batman_packet, tt_buff, if_incoming);
285
286 buff_pos += BAT_PACKET_LEN +
287 tt_len(batman_packet->tt_num_changes);
288
276 batman_packet = (struct batman_packet *) 289 batman_packet = (struct batman_packet *)
277 (packet_buff + buff_pos); 290 (packet_buff + buff_pos);
278 } while (aggregated_packet(buff_pos, packet_len, 291 } while (aggregated_packet(buff_pos, packet_len,
279 batman_packet->num_tt)); 292 batman_packet->tt_num_changes));
280} 293}
diff --git a/net/batman-adv/aggregation.h b/net/batman-adv/aggregation.h
index 7e6d72fbf54..216337bb841 100644
--- a/net/batman-adv/aggregation.h
+++ b/net/batman-adv/aggregation.h
@@ -25,9 +25,11 @@
25#include "main.h" 25#include "main.h"
26 26
27/* is there another aggregated packet here? */ 27/* is there another aggregated packet here? */
28static inline int aggregated_packet(int buff_pos, int packet_len, int num_tt) 28static inline int aggregated_packet(int buff_pos, int packet_len,
29 int tt_num_changes)
29{ 30{
30 int next_buff_pos = buff_pos + BAT_PACKET_LEN + (num_tt * ETH_ALEN); 31 int next_buff_pos = buff_pos + BAT_PACKET_LEN + (tt_num_changes *
32 sizeof(struct tt_change));
31 33
32 return (next_buff_pos <= packet_len) && 34 return (next_buff_pos <= packet_len) &&
33 (next_buff_pos <= MAX_AGGREGATION_BYTES); 35 (next_buff_pos <= MAX_AGGREGATION_BYTES);
@@ -35,9 +37,10 @@ static inline int aggregated_packet(int buff_pos, int packet_len, int num_tt)
35 37
36void add_bat_packet_to_list(struct bat_priv *bat_priv, 38void add_bat_packet_to_list(struct bat_priv *bat_priv,
37 unsigned char *packet_buff, int packet_len, 39 unsigned char *packet_buff, int packet_len,
38 struct hard_iface *if_incoming, char own_packet, 40 struct hard_iface *if_incoming, int own_packet,
39 unsigned long send_time); 41 unsigned long send_time);
40void receive_aggr_bat_packet(struct ethhdr *ethhdr, unsigned char *packet_buff, 42void receive_aggr_bat_packet(const struct ethhdr *ethhdr,
41 int packet_len, struct hard_iface *if_incoming); 43 unsigned char *packet_buff, int packet_len,
44 struct hard_iface *if_incoming);
42 45
43#endif /* _NET_BATMAN_ADV_AGGREGATION_H_ */ 46#endif /* _NET_BATMAN_ADV_AGGREGATION_H_ */
diff --git a/net/batman-adv/bat_debugfs.c b/net/batman-adv/bat_debugfs.c
index abaeec5f624..d0af9bf69e4 100644
--- a/net/batman-adv/bat_debugfs.c
+++ b/net/batman-adv/bat_debugfs.c
@@ -50,7 +50,8 @@ static void emit_log_char(struct debug_log *debug_log, char c)
50 debug_log->log_start = debug_log->log_end - log_buff_len; 50 debug_log->log_start = debug_log->log_end - log_buff_len;
51} 51}
52 52
53static int fdebug_log(struct debug_log *debug_log, char *fmt, ...) 53__printf(2, 3)
54static int fdebug_log(struct debug_log *debug_log, const char *fmt, ...)
54{ 55{
55 va_list args; 56 va_list args;
56 static char debug_log_buf[256]; 57 static char debug_log_buf[256];
@@ -74,14 +75,14 @@ static int fdebug_log(struct debug_log *debug_log, char *fmt, ...)
74 return 0; 75 return 0;
75} 76}
76 77
77int debug_log(struct bat_priv *bat_priv, char *fmt, ...) 78int debug_log(struct bat_priv *bat_priv, const char *fmt, ...)
78{ 79{
79 va_list args; 80 va_list args;
80 char tmp_log_buf[256]; 81 char tmp_log_buf[256];
81 82
82 va_start(args, fmt); 83 va_start(args, fmt);
83 vscnprintf(tmp_log_buf, sizeof(tmp_log_buf), fmt, args); 84 vscnprintf(tmp_log_buf, sizeof(tmp_log_buf), fmt, args);
84 fdebug_log(bat_priv->debug_log, "[%10u] %s", 85 fdebug_log(bat_priv->debug_log, "[%10lu] %s",
85 (jiffies / HZ), tmp_log_buf); 86 (jiffies / HZ), tmp_log_buf);
86 va_end(args); 87 va_end(args);
87 88
@@ -114,7 +115,7 @@ static ssize_t log_read(struct file *file, char __user *buf,
114 !(debug_log->log_end - debug_log->log_start)) 115 !(debug_log->log_end - debug_log->log_start))
115 return -EAGAIN; 116 return -EAGAIN;
116 117
117 if ((!buf) || (count < 0)) 118 if (!buf)
118 return -EINVAL; 119 return -EINVAL;
119 120
120 if (count == 0) 121 if (count == 0)
@@ -184,7 +185,7 @@ static int debug_log_setup(struct bat_priv *bat_priv)
184 if (!bat_priv->debug_dir) 185 if (!bat_priv->debug_dir)
185 goto err; 186 goto err;
186 187
187 bat_priv->debug_log = kzalloc(sizeof(struct debug_log), GFP_ATOMIC); 188 bat_priv->debug_log = kzalloc(sizeof(*bat_priv->debug_log), GFP_ATOMIC);
188 if (!bat_priv->debug_log) 189 if (!bat_priv->debug_log)
189 goto err; 190 goto err;
190 191
diff --git a/net/batman-adv/bat_sysfs.c b/net/batman-adv/bat_sysfs.c
index 497a0700cc3..cd15deba60a 100644
--- a/net/batman-adv/bat_sysfs.c
+++ b/net/batman-adv/bat_sysfs.c
@@ -28,9 +28,31 @@
28#include "gateway_client.h" 28#include "gateway_client.h"
29#include "vis.h" 29#include "vis.h"
30 30
31#define to_dev(obj) container_of(obj, struct device, kobj) 31static struct net_device *kobj_to_netdev(struct kobject *obj)
32#define kobj_to_netdev(obj) to_net_dev(to_dev(obj->parent)) 32{
33#define kobj_to_batpriv(obj) netdev_priv(kobj_to_netdev(obj)) 33 struct device *dev = container_of(obj->parent, struct device, kobj);
34 return to_net_dev(dev);
35}
36
37static struct bat_priv *kobj_to_batpriv(struct kobject *obj)
38{
39 struct net_device *net_dev = kobj_to_netdev(obj);
40 return netdev_priv(net_dev);
41}
42
43#define UEV_TYPE_VAR "BATTYPE="
44#define UEV_ACTION_VAR "BATACTION="
45#define UEV_DATA_VAR "BATDATA="
46
47static char *uev_action_str[] = {
48 "add",
49 "del",
50 "change"
51};
52
53static char *uev_type_str[] = {
54 "gw"
55};
34 56
35/* Use this, if you have customized show and store functions */ 57/* Use this, if you have customized show and store functions */
36#define BAT_ATTR(_name, _mode, _show, _store) \ 58#define BAT_ATTR(_name, _mode, _show, _store) \
@@ -96,7 +118,7 @@ ssize_t show_##_name(struct kobject *kobj, struct attribute *attr, \
96 118
97static int store_bool_attr(char *buff, size_t count, 119static int store_bool_attr(char *buff, size_t count,
98 struct net_device *net_dev, 120 struct net_device *net_dev,
99 char *attr_name, atomic_t *attr) 121 const char *attr_name, atomic_t *attr)
100{ 122{
101 int enabled = -1; 123 int enabled = -1;
102 124
@@ -138,16 +160,15 @@ static inline ssize_t __store_bool_attr(char *buff, size_t count,
138{ 160{
139 int ret; 161 int ret;
140 162
141 ret = store_bool_attr(buff, count, net_dev, (char *)attr->name, 163 ret = store_bool_attr(buff, count, net_dev, attr->name, attr_store);
142 attr_store);
143 if (post_func && ret) 164 if (post_func && ret)
144 post_func(net_dev); 165 post_func(net_dev);
145 166
146 return ret; 167 return ret;
147} 168}
148 169
149static int store_uint_attr(char *buff, size_t count, 170static int store_uint_attr(const char *buff, size_t count,
150 struct net_device *net_dev, char *attr_name, 171 struct net_device *net_dev, const char *attr_name,
151 unsigned int min, unsigned int max, atomic_t *attr) 172 unsigned int min, unsigned int max, atomic_t *attr)
152{ 173{
153 unsigned long uint_val; 174 unsigned long uint_val;
@@ -183,15 +204,15 @@ static int store_uint_attr(char *buff, size_t count,
183 return count; 204 return count;
184} 205}
185 206
186static inline ssize_t __store_uint_attr(char *buff, size_t count, 207static inline ssize_t __store_uint_attr(const char *buff, size_t count,
187 int min, int max, 208 int min, int max,
188 void (*post_func)(struct net_device *), 209 void (*post_func)(struct net_device *),
189 struct attribute *attr, 210 const struct attribute *attr,
190 atomic_t *attr_store, struct net_device *net_dev) 211 atomic_t *attr_store, struct net_device *net_dev)
191{ 212{
192 int ret; 213 int ret;
193 214
194 ret = store_uint_attr(buff, count, net_dev, (char *)attr->name, 215 ret = store_uint_attr(buff, count, net_dev, attr->name,
195 min, max, attr_store); 216 min, max, attr_store);
196 if (post_func && ret) 217 if (post_func && ret)
197 post_func(net_dev); 218 post_func(net_dev);
@@ -368,7 +389,7 @@ BAT_ATTR_UINT(gw_sel_class, S_IRUGO | S_IWUSR, 1, TQ_MAX_VALUE,
368static BAT_ATTR(gw_bandwidth, S_IRUGO | S_IWUSR, show_gw_bwidth, 389static BAT_ATTR(gw_bandwidth, S_IRUGO | S_IWUSR, show_gw_bwidth,
369 store_gw_bwidth); 390 store_gw_bwidth);
370#ifdef CONFIG_BATMAN_ADV_DEBUG 391#ifdef CONFIG_BATMAN_ADV_DEBUG
371BAT_ATTR_UINT(log_level, S_IRUGO | S_IWUSR, 0, 3, NULL); 392BAT_ATTR_UINT(log_level, S_IRUGO | S_IWUSR, 0, 7, NULL);
372#endif 393#endif
373 394
374static struct bat_attribute *mesh_attrs[] = { 395static struct bat_attribute *mesh_attrs[] = {
@@ -594,3 +615,60 @@ void sysfs_del_hardif(struct kobject **hardif_obj)
594 kobject_put(*hardif_obj); 615 kobject_put(*hardif_obj);
595 *hardif_obj = NULL; 616 *hardif_obj = NULL;
596} 617}
618
619int throw_uevent(struct bat_priv *bat_priv, enum uev_type type,
620 enum uev_action action, const char *data)
621{
622 int ret = -1;
623 struct hard_iface *primary_if = NULL;
624 struct kobject *bat_kobj;
625 char *uevent_env[4] = { NULL, NULL, NULL, NULL };
626
627 primary_if = primary_if_get_selected(bat_priv);
628 if (!primary_if)
629 goto out;
630
631 bat_kobj = &primary_if->soft_iface->dev.kobj;
632
633 uevent_env[0] = kmalloc(strlen(UEV_TYPE_VAR) +
634 strlen(uev_type_str[type]) + 1,
635 GFP_ATOMIC);
636 if (!uevent_env[0])
637 goto out;
638
639 sprintf(uevent_env[0], "%s%s", UEV_TYPE_VAR, uev_type_str[type]);
640
641 uevent_env[1] = kmalloc(strlen(UEV_ACTION_VAR) +
642 strlen(uev_action_str[action]) + 1,
643 GFP_ATOMIC);
644 if (!uevent_env[1])
645 goto out;
646
647 sprintf(uevent_env[1], "%s%s", UEV_ACTION_VAR, uev_action_str[action]);
648
649 /* If the event is DEL, ignore the data field */
650 if (action != UEV_DEL) {
651 uevent_env[2] = kmalloc(strlen(UEV_DATA_VAR) +
652 strlen(data) + 1, GFP_ATOMIC);
653 if (!uevent_env[2])
654 goto out;
655
656 sprintf(uevent_env[2], "%s%s", UEV_DATA_VAR, data);
657 }
658
659 ret = kobject_uevent_env(bat_kobj, KOBJ_CHANGE, uevent_env);
660out:
661 kfree(uevent_env[0]);
662 kfree(uevent_env[1]);
663 kfree(uevent_env[2]);
664
665 if (primary_if)
666 hardif_free_ref(primary_if);
667
668 if (ret)
669 bat_dbg(DBG_BATMAN, bat_priv, "Impossible to send "
670 "uevent for (%s,%s,%s) event (err: %d)\n",
671 uev_type_str[type], uev_action_str[action],
672 (action == UEV_DEL ? "NULL" : data), ret);
673 return ret;
674}
diff --git a/net/batman-adv/bat_sysfs.h b/net/batman-adv/bat_sysfs.h
index 02f1fa7aadf..a3f75a723c5 100644
--- a/net/batman-adv/bat_sysfs.h
+++ b/net/batman-adv/bat_sysfs.h
@@ -38,5 +38,7 @@ int sysfs_add_meshif(struct net_device *dev);
38void sysfs_del_meshif(struct net_device *dev); 38void sysfs_del_meshif(struct net_device *dev);
39int sysfs_add_hardif(struct kobject **hardif_obj, struct net_device *dev); 39int sysfs_add_hardif(struct kobject **hardif_obj, struct net_device *dev);
40void sysfs_del_hardif(struct kobject **hardif_obj); 40void sysfs_del_hardif(struct kobject **hardif_obj);
41int throw_uevent(struct bat_priv *bat_priv, enum uev_type type,
42 enum uev_action action, const char *data);
41 43
42#endif /* _NET_BATMAN_ADV_SYSFS_H_ */ 44#endif /* _NET_BATMAN_ADV_SYSFS_H_ */
diff --git a/net/batman-adv/bitarray.c b/net/batman-adv/bitarray.c
index ad2ca925b3e..c1f4bfc09cc 100644
--- a/net/batman-adv/bitarray.c
+++ b/net/batman-adv/bitarray.c
@@ -26,8 +26,8 @@
26 26
27/* returns true if the corresponding bit in the given seq_bits indicates true 27/* returns true if the corresponding bit in the given seq_bits indicates true
28 * and curr_seqno is within range of last_seqno */ 28 * and curr_seqno is within range of last_seqno */
29uint8_t get_bit_status(unsigned long *seq_bits, uint32_t last_seqno, 29int get_bit_status(const unsigned long *seq_bits, uint32_t last_seqno,
30 uint32_t curr_seqno) 30 uint32_t curr_seqno)
31{ 31{
32 int32_t diff, word_offset, word_num; 32 int32_t diff, word_offset, word_num;
33 33
@@ -127,10 +127,10 @@ static void bit_reset_window(unsigned long *seq_bits)
127 * 1 if the window was moved (either new or very old) 127 * 1 if the window was moved (either new or very old)
128 * 0 if the window was not moved/shifted. 128 * 0 if the window was not moved/shifted.
129 */ 129 */
130char bit_get_packet(void *priv, unsigned long *seq_bits, 130int bit_get_packet(void *priv, unsigned long *seq_bits,
131 int32_t seq_num_diff, int8_t set_mark) 131 int32_t seq_num_diff, int set_mark)
132{ 132{
133 struct bat_priv *bat_priv = (struct bat_priv *)priv; 133 struct bat_priv *bat_priv = priv;
134 134
135 /* sequence number is slightly older. We already got a sequence number 135 /* sequence number is slightly older. We already got a sequence number
136 * higher than this one, so we just mark it. */ 136 * higher than this one, so we just mark it. */
@@ -190,7 +190,7 @@ char bit_get_packet(void *priv, unsigned long *seq_bits,
190/* count the hamming weight, how many good packets did we receive? just count 190/* count the hamming weight, how many good packets did we receive? just count
191 * the 1's. 191 * the 1's.
192 */ 192 */
193int bit_packet_count(unsigned long *seq_bits) 193int bit_packet_count(const unsigned long *seq_bits)
194{ 194{
195 int i, hamming = 0; 195 int i, hamming = 0;
196 196
diff --git a/net/batman-adv/bitarray.h b/net/batman-adv/bitarray.h
index 769c246d1fc..9c04422aeb0 100644
--- a/net/batman-adv/bitarray.h
+++ b/net/batman-adv/bitarray.h
@@ -26,8 +26,8 @@
26 26
27/* returns true if the corresponding bit in the given seq_bits indicates true 27/* returns true if the corresponding bit in the given seq_bits indicates true
28 * and curr_seqno is within range of last_seqno */ 28 * and curr_seqno is within range of last_seqno */
29uint8_t get_bit_status(unsigned long *seq_bits, uint32_t last_seqno, 29int get_bit_status(const unsigned long *seq_bits, uint32_t last_seqno,
30 uint32_t curr_seqno); 30 uint32_t curr_seqno);
31 31
32/* turn corresponding bit on, so we can remember that we got the packet */ 32/* turn corresponding bit on, so we can remember that we got the packet */
33void bit_mark(unsigned long *seq_bits, int32_t n); 33void bit_mark(unsigned long *seq_bits, int32_t n);
@@ -35,10 +35,10 @@ void bit_mark(unsigned long *seq_bits, int32_t n);
35 35
36/* receive and process one packet, returns 1 if received seq_num is considered 36/* receive and process one packet, returns 1 if received seq_num is considered
37 * new, 0 if old */ 37 * new, 0 if old */
38char bit_get_packet(void *priv, unsigned long *seq_bits, 38int bit_get_packet(void *priv, unsigned long *seq_bits,
39 int32_t seq_num_diff, int8_t set_mark); 39 int32_t seq_num_diff, int set_mark);
40 40
41/* count the hamming weight, how many good packets did we receive? */ 41/* count the hamming weight, how many good packets did we receive? */
42int bit_packet_count(unsigned long *seq_bits); 42int bit_packet_count(const unsigned long *seq_bits);
43 43
44#endif /* _NET_BATMAN_ADV_BITARRAY_H_ */ 44#endif /* _NET_BATMAN_ADV_BITARRAY_H_ */
diff --git a/net/batman-adv/gateway_client.c b/net/batman-adv/gateway_client.c
index 61605a0f3f3..056180ef9e1 100644
--- a/net/batman-adv/gateway_client.c
+++ b/net/batman-adv/gateway_client.c
@@ -20,15 +20,22 @@
20 */ 20 */
21 21
22#include "main.h" 22#include "main.h"
23#include "bat_sysfs.h"
23#include "gateway_client.h" 24#include "gateway_client.h"
24#include "gateway_common.h" 25#include "gateway_common.h"
25#include "hard-interface.h" 26#include "hard-interface.h"
26#include "originator.h" 27#include "originator.h"
28#include "routing.h"
27#include <linux/ip.h> 29#include <linux/ip.h>
28#include <linux/ipv6.h> 30#include <linux/ipv6.h>
29#include <linux/udp.h> 31#include <linux/udp.h>
30#include <linux/if_vlan.h> 32#include <linux/if_vlan.h>
31 33
34/* This is the offset of the options field in a dhcp packet starting at
35 * the beginning of the dhcp header */
36#define DHCP_OPTIONS_OFFSET 240
37#define DHCP_REQUEST 3
38
32static void gw_node_free_ref(struct gw_node *gw_node) 39static void gw_node_free_ref(struct gw_node *gw_node)
33{ 40{
34 if (atomic_dec_and_test(&gw_node->refcount)) 41 if (atomic_dec_and_test(&gw_node->refcount))
@@ -86,7 +93,7 @@ static void gw_select(struct bat_priv *bat_priv, struct gw_node *new_gw_node)
86 if (new_gw_node && !atomic_inc_not_zero(&new_gw_node->refcount)) 93 if (new_gw_node && !atomic_inc_not_zero(&new_gw_node->refcount))
87 new_gw_node = NULL; 94 new_gw_node = NULL;
88 95
89 curr_gw_node = bat_priv->curr_gw; 96 curr_gw_node = rcu_dereference_protected(bat_priv->curr_gw, 1);
90 rcu_assign_pointer(bat_priv->curr_gw, new_gw_node); 97 rcu_assign_pointer(bat_priv->curr_gw, new_gw_node);
91 98
92 if (curr_gw_node) 99 if (curr_gw_node)
@@ -97,40 +104,19 @@ static void gw_select(struct bat_priv *bat_priv, struct gw_node *new_gw_node)
97 104
98void gw_deselect(struct bat_priv *bat_priv) 105void gw_deselect(struct bat_priv *bat_priv)
99{ 106{
100 gw_select(bat_priv, NULL); 107 atomic_set(&bat_priv->gw_reselect, 1);
101} 108}
102 109
103void gw_election(struct bat_priv *bat_priv) 110static struct gw_node *gw_get_best_gw_node(struct bat_priv *bat_priv)
104{ 111{
105 struct hlist_node *node;
106 struct gw_node *gw_node, *curr_gw = NULL, *curr_gw_tmp = NULL;
107 struct neigh_node *router; 112 struct neigh_node *router;
108 uint8_t max_tq = 0; 113 struct hlist_node *node;
114 struct gw_node *gw_node, *curr_gw = NULL;
109 uint32_t max_gw_factor = 0, tmp_gw_factor = 0; 115 uint32_t max_gw_factor = 0, tmp_gw_factor = 0;
116 uint8_t max_tq = 0;
110 int down, up; 117 int down, up;
111 118
112 /**
113 * The batman daemon checks here if we already passed a full originator
114 * cycle in order to make sure we don't choose the first gateway we
115 * hear about. This check is based on the daemon's uptime which we
116 * don't have.
117 **/
118 if (atomic_read(&bat_priv->gw_mode) != GW_MODE_CLIENT)
119 return;
120
121 curr_gw = gw_get_selected_gw_node(bat_priv);
122 if (curr_gw)
123 goto out;
124
125 rcu_read_lock(); 119 rcu_read_lock();
126 if (hlist_empty(&bat_priv->gw_list)) {
127 bat_dbg(DBG_BATMAN, bat_priv,
128 "Removing selected gateway - "
129 "no gateway in range\n");
130 gw_deselect(bat_priv);
131 goto unlock;
132 }
133
134 hlist_for_each_entry_rcu(gw_node, node, &bat_priv->gw_list, list) { 120 hlist_for_each_entry_rcu(gw_node, node, &bat_priv->gw_list, list) {
135 if (gw_node->deleted) 121 if (gw_node->deleted)
136 continue; 122 continue;
@@ -139,6 +125,9 @@ void gw_election(struct bat_priv *bat_priv)
139 if (!router) 125 if (!router)
140 continue; 126 continue;
141 127
128 if (!atomic_inc_not_zero(&gw_node->refcount))
129 goto next;
130
142 switch (atomic_read(&bat_priv->gw_sel_class)) { 131 switch (atomic_read(&bat_priv->gw_sel_class)) {
143 case 1: /* fast connection */ 132 case 1: /* fast connection */
144 gw_bandwidth_to_kbit(gw_node->orig_node->gw_flags, 133 gw_bandwidth_to_kbit(gw_node->orig_node->gw_flags,
@@ -151,8 +140,12 @@ void gw_election(struct bat_priv *bat_priv)
151 140
152 if ((tmp_gw_factor > max_gw_factor) || 141 if ((tmp_gw_factor > max_gw_factor) ||
153 ((tmp_gw_factor == max_gw_factor) && 142 ((tmp_gw_factor == max_gw_factor) &&
154 (router->tq_avg > max_tq))) 143 (router->tq_avg > max_tq))) {
155 curr_gw_tmp = gw_node; 144 if (curr_gw)
145 gw_node_free_ref(curr_gw);
146 curr_gw = gw_node;
147 atomic_inc(&curr_gw->refcount);
148 }
156 break; 149 break;
157 150
158 default: /** 151 default: /**
@@ -163,8 +156,12 @@ void gw_election(struct bat_priv *bat_priv)
163 * soon as a better gateway appears which has 156 * soon as a better gateway appears which has
164 * $routing_class more tq points) 157 * $routing_class more tq points)
165 **/ 158 **/
166 if (router->tq_avg > max_tq) 159 if (router->tq_avg > max_tq) {
167 curr_gw_tmp = gw_node; 160 if (curr_gw)
161 gw_node_free_ref(curr_gw);
162 curr_gw = gw_node;
163 atomic_inc(&curr_gw->refcount);
164 }
168 break; 165 break;
169 } 166 }
170 167
@@ -174,42 +171,81 @@ void gw_election(struct bat_priv *bat_priv)
174 if (tmp_gw_factor > max_gw_factor) 171 if (tmp_gw_factor > max_gw_factor)
175 max_gw_factor = tmp_gw_factor; 172 max_gw_factor = tmp_gw_factor;
176 173
174 gw_node_free_ref(gw_node);
175
176next:
177 neigh_node_free_ref(router); 177 neigh_node_free_ref(router);
178 } 178 }
179 rcu_read_unlock();
179 180
180 if (curr_gw != curr_gw_tmp) { 181 return curr_gw;
181 router = orig_node_get_router(curr_gw_tmp->orig_node); 182}
182 if (!router)
183 goto unlock;
184 183
185 if ((curr_gw) && (!curr_gw_tmp)) 184void gw_election(struct bat_priv *bat_priv)
186 bat_dbg(DBG_BATMAN, bat_priv, 185{
187 "Removing selected gateway - " 186 struct gw_node *curr_gw = NULL, *next_gw = NULL;
188 "no gateway in range\n"); 187 struct neigh_node *router = NULL;
189 else if ((!curr_gw) && (curr_gw_tmp)) 188 char gw_addr[18] = { '\0' };
190 bat_dbg(DBG_BATMAN, bat_priv,
191 "Adding route to gateway %pM "
192 "(gw_flags: %i, tq: %i)\n",
193 curr_gw_tmp->orig_node->orig,
194 curr_gw_tmp->orig_node->gw_flags,
195 router->tq_avg);
196 else
197 bat_dbg(DBG_BATMAN, bat_priv,
198 "Changing route to gateway %pM "
199 "(gw_flags: %i, tq: %i)\n",
200 curr_gw_tmp->orig_node->orig,
201 curr_gw_tmp->orig_node->gw_flags,
202 router->tq_avg);
203 189
204 neigh_node_free_ref(router); 190 /**
205 gw_select(bat_priv, curr_gw_tmp); 191 * The batman daemon checks here if we already passed a full originator
192 * cycle in order to make sure we don't choose the first gateway we
193 * hear about. This check is based on the daemon's uptime which we
194 * don't have.
195 **/
196 if (atomic_read(&bat_priv->gw_mode) != GW_MODE_CLIENT)
197 goto out;
198
199 if (!atomic_dec_not_zero(&bat_priv->gw_reselect))
200 goto out;
201
202 curr_gw = gw_get_selected_gw_node(bat_priv);
203
204 next_gw = gw_get_best_gw_node(bat_priv);
205
206 if (curr_gw == next_gw)
207 goto out;
208
209 if (next_gw) {
210 sprintf(gw_addr, "%pM", next_gw->orig_node->orig);
211
212 router = orig_node_get_router(next_gw->orig_node);
213 if (!router) {
214 gw_deselect(bat_priv);
215 goto out;
216 }
206 } 217 }
207 218
208unlock: 219 if ((curr_gw) && (!next_gw)) {
209 rcu_read_unlock(); 220 bat_dbg(DBG_BATMAN, bat_priv,
221 "Removing selected gateway - no gateway in range\n");
222 throw_uevent(bat_priv, UEV_GW, UEV_DEL, NULL);
223 } else if ((!curr_gw) && (next_gw)) {
224 bat_dbg(DBG_BATMAN, bat_priv,
225 "Adding route to gateway %pM (gw_flags: %i, tq: %i)\n",
226 next_gw->orig_node->orig,
227 next_gw->orig_node->gw_flags,
228 router->tq_avg);
229 throw_uevent(bat_priv, UEV_GW, UEV_ADD, gw_addr);
230 } else {
231 bat_dbg(DBG_BATMAN, bat_priv,
232 "Changing route to gateway %pM "
233 "(gw_flags: %i, tq: %i)\n",
234 next_gw->orig_node->orig,
235 next_gw->orig_node->gw_flags,
236 router->tq_avg);
237 throw_uevent(bat_priv, UEV_GW, UEV_CHANGE, gw_addr);
238 }
239
240 gw_select(bat_priv, next_gw);
241
210out: 242out:
211 if (curr_gw) 243 if (curr_gw)
212 gw_node_free_ref(curr_gw); 244 gw_node_free_ref(curr_gw);
245 if (next_gw)
246 gw_node_free_ref(next_gw);
247 if (router)
248 neigh_node_free_ref(router);
213} 249}
214 250
215void gw_check_election(struct bat_priv *bat_priv, struct orig_node *orig_node) 251void gw_check_election(struct bat_priv *bat_priv, struct orig_node *orig_node)
@@ -273,11 +309,10 @@ static void gw_node_add(struct bat_priv *bat_priv,
273 struct gw_node *gw_node; 309 struct gw_node *gw_node;
274 int down, up; 310 int down, up;
275 311
276 gw_node = kmalloc(sizeof(struct gw_node), GFP_ATOMIC); 312 gw_node = kzalloc(sizeof(*gw_node), GFP_ATOMIC);
277 if (!gw_node) 313 if (!gw_node)
278 return; 314 return;
279 315
280 memset(gw_node, 0, sizeof(struct gw_node));
281 INIT_HLIST_NODE(&gw_node->list); 316 INIT_HLIST_NODE(&gw_node->list);
282 gw_node->orig_node = orig_node; 317 gw_node->orig_node = orig_node;
283 atomic_set(&gw_node->refcount, 1); 318 atomic_set(&gw_node->refcount, 1);
@@ -323,7 +358,7 @@ void gw_node_update(struct bat_priv *bat_priv,
323 358
324 gw_node->deleted = 0; 359 gw_node->deleted = 0;
325 360
326 if (new_gwflags == 0) { 361 if (new_gwflags == NO_FLAGS) {
327 gw_node->deleted = jiffies; 362 gw_node->deleted = jiffies;
328 bat_dbg(DBG_BATMAN, bat_priv, 363 bat_dbg(DBG_BATMAN, bat_priv,
329 "Gateway %pM removed from gateway list\n", 364 "Gateway %pM removed from gateway list\n",
@@ -336,7 +371,7 @@ void gw_node_update(struct bat_priv *bat_priv,
336 goto unlock; 371 goto unlock;
337 } 372 }
338 373
339 if (new_gwflags == 0) 374 if (new_gwflags == NO_FLAGS)
340 goto unlock; 375 goto unlock;
341 376
342 gw_node_add(bat_priv, orig_node, new_gwflags); 377 gw_node_add(bat_priv, orig_node, new_gwflags);
@@ -353,7 +388,7 @@ unlock:
353 388
354void gw_node_delete(struct bat_priv *bat_priv, struct orig_node *orig_node) 389void gw_node_delete(struct bat_priv *bat_priv, struct orig_node *orig_node)
355{ 390{
356 return gw_node_update(bat_priv, orig_node, 0); 391 gw_node_update(bat_priv, orig_node, 0);
357} 392}
358 393
359void gw_node_purge(struct bat_priv *bat_priv) 394void gw_node_purge(struct bat_priv *bat_priv)
@@ -361,7 +396,7 @@ void gw_node_purge(struct bat_priv *bat_priv)
361 struct gw_node *gw_node, *curr_gw; 396 struct gw_node *gw_node, *curr_gw;
362 struct hlist_node *node, *node_tmp; 397 struct hlist_node *node, *node_tmp;
363 unsigned long timeout = 2 * PURGE_TIMEOUT * HZ; 398 unsigned long timeout = 2 * PURGE_TIMEOUT * HZ;
364 char do_deselect = 0; 399 int do_deselect = 0;
365 400
366 curr_gw = gw_get_selected_gw_node(bat_priv); 401 curr_gw = gw_get_selected_gw_node(bat_priv);
367 402
@@ -394,8 +429,8 @@ void gw_node_purge(struct bat_priv *bat_priv)
394/** 429/**
395 * fails if orig_node has no router 430 * fails if orig_node has no router
396 */ 431 */
397static int _write_buffer_text(struct bat_priv *bat_priv, 432static int _write_buffer_text(struct bat_priv *bat_priv, struct seq_file *seq,
398 struct seq_file *seq, struct gw_node *gw_node) 433 const struct gw_node *gw_node)
399{ 434{
400 struct gw_node *curr_gw; 435 struct gw_node *curr_gw;
401 struct neigh_node *router; 436 struct neigh_node *router;
@@ -452,10 +487,9 @@ int gw_client_seq_print_text(struct seq_file *seq, void *offset)
452 } 487 }
453 488
454 seq_printf(seq, " %-12s (%s/%i) %17s [%10s]: gw_class ... " 489 seq_printf(seq, " %-12s (%s/%i) %17s [%10s]: gw_class ... "
455 "[B.A.T.M.A.N. adv %s%s, MainIF/MAC: %s/%pM (%s)]\n", 490 "[B.A.T.M.A.N. adv %s, MainIF/MAC: %s/%pM (%s)]\n",
456 "Gateway", "#", TQ_MAX_VALUE, "Nexthop", 491 "Gateway", "#", TQ_MAX_VALUE, "Nexthop",
457 "outgoingIF", SOURCE_VERSION, REVISION_VERSION_STR, 492 "outgoingIF", SOURCE_VERSION, primary_if->net_dev->name,
458 primary_if->net_dev->name,
459 primary_if->net_dev->dev_addr, net_dev->name); 493 primary_if->net_dev->dev_addr, net_dev->name);
460 494
461 rcu_read_lock(); 495 rcu_read_lock();
@@ -480,14 +514,75 @@ out:
480 return ret; 514 return ret;
481} 515}
482 516
483int gw_is_target(struct bat_priv *bat_priv, struct sk_buff *skb) 517static bool is_type_dhcprequest(struct sk_buff *skb, int header_len)
518{
519 int ret = false;
520 unsigned char *p;
521 int pkt_len;
522
523 if (skb_linearize(skb) < 0)
524 goto out;
525
526 pkt_len = skb_headlen(skb);
527
528 if (pkt_len < header_len + DHCP_OPTIONS_OFFSET + 1)
529 goto out;
530
531 p = skb->data + header_len + DHCP_OPTIONS_OFFSET;
532 pkt_len -= header_len + DHCP_OPTIONS_OFFSET + 1;
533
534 /* Access the dhcp option lists. Each entry is made up by:
535 * - octect 1: option type
536 * - octect 2: option data len (only if type != 255 and 0)
537 * - octect 3: option data */
538 while (*p != 255 && !ret) {
539 /* p now points to the first octect: option type */
540 if (*p == 53) {
541 /* type 53 is the message type option.
542 * Jump the len octect and go to the data octect */
543 if (pkt_len < 2)
544 goto out;
545 p += 2;
546
547 /* check if the message type is what we need */
548 if (*p == DHCP_REQUEST)
549 ret = true;
550 break;
551 } else if (*p == 0) {
552 /* option type 0 (padding), just go forward */
553 if (pkt_len < 1)
554 goto out;
555 pkt_len--;
556 p++;
557 } else {
558 /* This is any other option. So we get the length... */
559 if (pkt_len < 1)
560 goto out;
561 pkt_len--;
562 p++;
563
564 /* ...and then we jump over the data */
565 if (pkt_len < *p)
566 goto out;
567 pkt_len -= *p;
568 p += (*p);
569 }
570 }
571out:
572 return ret;
573}
574
575int gw_is_target(struct bat_priv *bat_priv, struct sk_buff *skb,
576 struct orig_node *old_gw)
484{ 577{
485 struct ethhdr *ethhdr; 578 struct ethhdr *ethhdr;
486 struct iphdr *iphdr; 579 struct iphdr *iphdr;
487 struct ipv6hdr *ipv6hdr; 580 struct ipv6hdr *ipv6hdr;
488 struct udphdr *udphdr; 581 struct udphdr *udphdr;
489 struct gw_node *curr_gw; 582 struct gw_node *curr_gw;
583 struct neigh_node *neigh_curr = NULL, *neigh_old = NULL;
490 unsigned int header_len = 0; 584 unsigned int header_len = 0;
585 int ret = 1;
491 586
492 if (atomic_read(&bat_priv->gw_mode) == GW_MODE_OFF) 587 if (atomic_read(&bat_priv->gw_mode) == GW_MODE_OFF)
493 return 0; 588 return 0;
@@ -509,7 +604,7 @@ int gw_is_target(struct bat_priv *bat_priv, struct sk_buff *skb)
509 /* check for ip header */ 604 /* check for ip header */
510 switch (ntohs(ethhdr->h_proto)) { 605 switch (ntohs(ethhdr->h_proto)) {
511 case ETH_P_IP: 606 case ETH_P_IP:
512 if (!pskb_may_pull(skb, header_len + sizeof(struct iphdr))) 607 if (!pskb_may_pull(skb, header_len + sizeof(*iphdr)))
513 return 0; 608 return 0;
514 iphdr = (struct iphdr *)(skb->data + header_len); 609 iphdr = (struct iphdr *)(skb->data + header_len);
515 header_len += iphdr->ihl * 4; 610 header_len += iphdr->ihl * 4;
@@ -520,10 +615,10 @@ int gw_is_target(struct bat_priv *bat_priv, struct sk_buff *skb)
520 615
521 break; 616 break;
522 case ETH_P_IPV6: 617 case ETH_P_IPV6:
523 if (!pskb_may_pull(skb, header_len + sizeof(struct ipv6hdr))) 618 if (!pskb_may_pull(skb, header_len + sizeof(*ipv6hdr)))
524 return 0; 619 return 0;
525 ipv6hdr = (struct ipv6hdr *)(skb->data + header_len); 620 ipv6hdr = (struct ipv6hdr *)(skb->data + header_len);
526 header_len += sizeof(struct ipv6hdr); 621 header_len += sizeof(*ipv6hdr);
527 622
528 /* check for udp header */ 623 /* check for udp header */
529 if (ipv6hdr->nexthdr != IPPROTO_UDP) 624 if (ipv6hdr->nexthdr != IPPROTO_UDP)
@@ -534,10 +629,10 @@ int gw_is_target(struct bat_priv *bat_priv, struct sk_buff *skb)
534 return 0; 629 return 0;
535 } 630 }
536 631
537 if (!pskb_may_pull(skb, header_len + sizeof(struct udphdr))) 632 if (!pskb_may_pull(skb, header_len + sizeof(*udphdr)))
538 return 0; 633 return 0;
539 udphdr = (struct udphdr *)(skb->data + header_len); 634 udphdr = (struct udphdr *)(skb->data + header_len);
540 header_len += sizeof(struct udphdr); 635 header_len += sizeof(*udphdr);
541 636
542 /* check for bootp port */ 637 /* check for bootp port */
543 if ((ntohs(ethhdr->h_proto) == ETH_P_IP) && 638 if ((ntohs(ethhdr->h_proto) == ETH_P_IP) &&
@@ -555,7 +650,30 @@ int gw_is_target(struct bat_priv *bat_priv, struct sk_buff *skb)
555 if (!curr_gw) 650 if (!curr_gw)
556 return 0; 651 return 0;
557 652
653 /* If old_gw != NULL then this packet is unicast.
654 * So, at this point we have to check the message type: if it is a
655 * DHCPREQUEST we have to decide whether to drop it or not */
656 if (old_gw && curr_gw->orig_node != old_gw) {
657 if (is_type_dhcprequest(skb, header_len)) {
658 /* If the dhcp packet has been sent to a different gw,
659 * we have to evaluate whether the old gw is still
660 * reliable enough */
661 neigh_curr = find_router(bat_priv, curr_gw->orig_node,
662 NULL);
663 neigh_old = find_router(bat_priv, old_gw, NULL);
664 if (!neigh_curr || !neigh_old)
665 goto free_neigh;
666 if (neigh_curr->tq_avg - neigh_old->tq_avg <
667 GW_THRESHOLD)
668 ret = -1;
669 }
670 }
671free_neigh:
672 if (neigh_old)
673 neigh_node_free_ref(neigh_old);
674 if (neigh_curr)
675 neigh_node_free_ref(neigh_curr);
558 if (curr_gw) 676 if (curr_gw)
559 gw_node_free_ref(curr_gw); 677 gw_node_free_ref(curr_gw);
560 return 1; 678 return ret;
561} 679}
diff --git a/net/batman-adv/gateway_client.h b/net/batman-adv/gateway_client.h
index 1ce8c6066da..b9b983c07fe 100644
--- a/net/batman-adv/gateway_client.h
+++ b/net/batman-adv/gateway_client.h
@@ -31,6 +31,7 @@ void gw_node_update(struct bat_priv *bat_priv,
31void gw_node_delete(struct bat_priv *bat_priv, struct orig_node *orig_node); 31void gw_node_delete(struct bat_priv *bat_priv, struct orig_node *orig_node);
32void gw_node_purge(struct bat_priv *bat_priv); 32void gw_node_purge(struct bat_priv *bat_priv);
33int gw_client_seq_print_text(struct seq_file *seq, void *offset); 33int gw_client_seq_print_text(struct seq_file *seq, void *offset);
34int gw_is_target(struct bat_priv *bat_priv, struct sk_buff *skb); 34int gw_is_target(struct bat_priv *bat_priv, struct sk_buff *skb,
35 struct orig_node *old_gw);
35 36
36#endif /* _NET_BATMAN_ADV_GATEWAY_CLIENT_H_ */ 37#endif /* _NET_BATMAN_ADV_GATEWAY_CLIENT_H_ */
diff --git a/net/batman-adv/gateway_common.c b/net/batman-adv/gateway_common.c
index 50d3a59a3d7..18661af0bc3 100644
--- a/net/batman-adv/gateway_common.c
+++ b/net/batman-adv/gateway_common.c
@@ -61,9 +61,9 @@ static void kbit_to_gw_bandwidth(int down, int up, long *gw_srv_class)
61/* returns the up and downspeeds in kbit, calculated from the class */ 61/* returns the up and downspeeds in kbit, calculated from the class */
62void gw_bandwidth_to_kbit(uint8_t gw_srv_class, int *down, int *up) 62void gw_bandwidth_to_kbit(uint8_t gw_srv_class, int *down, int *up)
63{ 63{
64 char sbit = (gw_srv_class & 0x80) >> 7; 64 int sbit = (gw_srv_class & 0x80) >> 7;
65 char dpart = (gw_srv_class & 0x78) >> 3; 65 int dpart = (gw_srv_class & 0x78) >> 3;
66 char upart = (gw_srv_class & 0x07); 66 int upart = (gw_srv_class & 0x07);
67 67
68 if (!gw_srv_class) { 68 if (!gw_srv_class) {
69 *down = 0; 69 *down = 0;
@@ -76,10 +76,11 @@ void gw_bandwidth_to_kbit(uint8_t gw_srv_class, int *down, int *up)
76} 76}
77 77
78static bool parse_gw_bandwidth(struct net_device *net_dev, char *buff, 78static bool parse_gw_bandwidth(struct net_device *net_dev, char *buff,
79 long *up, long *down) 79 int *up, int *down)
80{ 80{
81 int ret, multi = 1; 81 int ret, multi = 1;
82 char *slash_ptr, *tmp_ptr; 82 char *slash_ptr, *tmp_ptr;
83 long ldown, lup;
83 84
84 slash_ptr = strchr(buff, '/'); 85 slash_ptr = strchr(buff, '/');
85 if (slash_ptr) 86 if (slash_ptr)
@@ -96,7 +97,7 @@ static bool parse_gw_bandwidth(struct net_device *net_dev, char *buff,
96 *tmp_ptr = '\0'; 97 *tmp_ptr = '\0';
97 } 98 }
98 99
99 ret = strict_strtoul(buff, 10, down); 100 ret = strict_strtol(buff, 10, &ldown);
100 if (ret) { 101 if (ret) {
101 bat_err(net_dev, 102 bat_err(net_dev,
102 "Download speed of gateway mode invalid: %s\n", 103 "Download speed of gateway mode invalid: %s\n",
@@ -104,7 +105,7 @@ static bool parse_gw_bandwidth(struct net_device *net_dev, char *buff,
104 return false; 105 return false;
105 } 106 }
106 107
107 *down *= multi; 108 *down = ldown * multi;
108 109
109 /* we also got some upload info */ 110 /* we also got some upload info */
110 if (slash_ptr) { 111 if (slash_ptr) {
@@ -121,7 +122,7 @@ static bool parse_gw_bandwidth(struct net_device *net_dev, char *buff,
121 *tmp_ptr = '\0'; 122 *tmp_ptr = '\0';
122 } 123 }
123 124
124 ret = strict_strtoul(slash_ptr + 1, 10, up); 125 ret = strict_strtol(slash_ptr + 1, 10, &lup);
125 if (ret) { 126 if (ret) {
126 bat_err(net_dev, 127 bat_err(net_dev,
127 "Upload speed of gateway mode invalid: " 128 "Upload speed of gateway mode invalid: "
@@ -129,7 +130,7 @@ static bool parse_gw_bandwidth(struct net_device *net_dev, char *buff,
129 return false; 130 return false;
130 } 131 }
131 132
132 *up *= multi; 133 *up = lup * multi;
133 } 134 }
134 135
135 return true; 136 return true;
@@ -138,7 +139,8 @@ static bool parse_gw_bandwidth(struct net_device *net_dev, char *buff,
138ssize_t gw_bandwidth_set(struct net_device *net_dev, char *buff, size_t count) 139ssize_t gw_bandwidth_set(struct net_device *net_dev, char *buff, size_t count)
139{ 140{
140 struct bat_priv *bat_priv = netdev_priv(net_dev); 141 struct bat_priv *bat_priv = netdev_priv(net_dev);
141 long gw_bandwidth_tmp = 0, up = 0, down = 0; 142 long gw_bandwidth_tmp = 0;
143 int up = 0, down = 0;
142 bool ret; 144 bool ret;
143 145
144 ret = parse_gw_bandwidth(net_dev, buff, &up, &down); 146 ret = parse_gw_bandwidth(net_dev, buff, &up, &down);
@@ -158,12 +160,11 @@ ssize_t gw_bandwidth_set(struct net_device *net_dev, char *buff, size_t count)
158 * speeds, hence we need to calculate it back to show the number 160 * speeds, hence we need to calculate it back to show the number
159 * that is going to be propagated 161 * that is going to be propagated
160 **/ 162 **/
161 gw_bandwidth_to_kbit((uint8_t)gw_bandwidth_tmp, 163 gw_bandwidth_to_kbit((uint8_t)gw_bandwidth_tmp, &down, &up);
162 (int *)&down, (int *)&up);
163 164
164 gw_deselect(bat_priv); 165 gw_deselect(bat_priv);
165 bat_info(net_dev, "Changing gateway bandwidth from: '%i' to: '%ld' " 166 bat_info(net_dev, "Changing gateway bandwidth from: '%i' to: '%ld' "
166 "(propagating: %ld%s/%ld%s)\n", 167 "(propagating: %d%s/%d%s)\n",
167 atomic_read(&bat_priv->gw_bandwidth), gw_bandwidth_tmp, 168 atomic_read(&bat_priv->gw_bandwidth), gw_bandwidth_tmp,
168 (down > 2048 ? down / 1024 : down), 169 (down > 2048 ? down / 1024 : down),
169 (down > 2048 ? "MBit" : "KBit"), 170 (down > 2048 ? "MBit" : "KBit"),
diff --git a/net/batman-adv/hard-interface.c b/net/batman-adv/hard-interface.c
index dfbfccc9fe4..db7aacf1e09 100644
--- a/net/batman-adv/hard-interface.c
+++ b/net/batman-adv/hard-interface.c
@@ -46,7 +46,7 @@ void hardif_free_rcu(struct rcu_head *rcu)
46 kfree(hard_iface); 46 kfree(hard_iface);
47} 47}
48 48
49struct hard_iface *hardif_get_by_netdev(struct net_device *net_dev) 49struct hard_iface *hardif_get_by_netdev(const struct net_device *net_dev)
50{ 50{
51 struct hard_iface *hard_iface; 51 struct hard_iface *hard_iface;
52 52
@@ -64,7 +64,7 @@ out:
64 return hard_iface; 64 return hard_iface;
65} 65}
66 66
67static int is_valid_iface(struct net_device *net_dev) 67static int is_valid_iface(const struct net_device *net_dev)
68{ 68{
69 if (net_dev->flags & IFF_LOOPBACK) 69 if (net_dev->flags & IFF_LOOPBACK)
70 return 0; 70 return 0;
@@ -86,7 +86,7 @@ static int is_valid_iface(struct net_device *net_dev)
86 return 1; 86 return 1;
87} 87}
88 88
89static struct hard_iface *hardif_get_active(struct net_device *soft_iface) 89static struct hard_iface *hardif_get_active(const struct net_device *soft_iface)
90{ 90{
91 struct hard_iface *hard_iface; 91 struct hard_iface *hard_iface;
92 92
@@ -138,7 +138,7 @@ static void primary_if_select(struct bat_priv *bat_priv,
138 if (new_hard_iface && !atomic_inc_not_zero(&new_hard_iface->refcount)) 138 if (new_hard_iface && !atomic_inc_not_zero(&new_hard_iface->refcount))
139 new_hard_iface = NULL; 139 new_hard_iface = NULL;
140 140
141 curr_hard_iface = bat_priv->primary_if; 141 curr_hard_iface = rcu_dereference_protected(bat_priv->primary_if, 1);
142 rcu_assign_pointer(bat_priv->primary_if, new_hard_iface); 142 rcu_assign_pointer(bat_priv->primary_if, new_hard_iface);
143 143
144 if (curr_hard_iface) 144 if (curr_hard_iface)
@@ -152,15 +152,9 @@ static void primary_if_select(struct bat_priv *bat_priv,
152 batman_packet->ttl = TTL; 152 batman_packet->ttl = TTL;
153 153
154 primary_if_update_addr(bat_priv); 154 primary_if_update_addr(bat_priv);
155
156 /***
157 * hacky trick to make sure that we send the TT information via
158 * our new primary interface
159 */
160 atomic_set(&bat_priv->tt_local_changed, 1);
161} 155}
162 156
163static bool hardif_is_iface_up(struct hard_iface *hard_iface) 157static bool hardif_is_iface_up(const struct hard_iface *hard_iface)
164{ 158{
165 if (hard_iface->net_dev->flags & IFF_UP) 159 if (hard_iface->net_dev->flags & IFF_UP)
166 return true; 160 return true;
@@ -176,9 +170,9 @@ static void update_mac_addresses(struct hard_iface *hard_iface)
176 hard_iface->net_dev->dev_addr, ETH_ALEN); 170 hard_iface->net_dev->dev_addr, ETH_ALEN);
177} 171}
178 172
179static void check_known_mac_addr(struct net_device *net_dev) 173static void check_known_mac_addr(const struct net_device *net_dev)
180{ 174{
181 struct hard_iface *hard_iface; 175 const struct hard_iface *hard_iface;
182 176
183 rcu_read_lock(); 177 rcu_read_lock();
184 list_for_each_entry_rcu(hard_iface, &hardif_list, list) { 178 list_for_each_entry_rcu(hard_iface, &hardif_list, list) {
@@ -204,8 +198,8 @@ static void check_known_mac_addr(struct net_device *net_dev)
204 198
205int hardif_min_mtu(struct net_device *soft_iface) 199int hardif_min_mtu(struct net_device *soft_iface)
206{ 200{
207 struct bat_priv *bat_priv = netdev_priv(soft_iface); 201 const struct bat_priv *bat_priv = netdev_priv(soft_iface);
208 struct hard_iface *hard_iface; 202 const struct hard_iface *hard_iface;
209 /* allow big frames if all devices are capable to do so 203 /* allow big frames if all devices are capable to do so
210 * (have MTU > 1500 + BAT_HEADER_LEN) */ 204 * (have MTU > 1500 + BAT_HEADER_LEN) */
211 int min_mtu = ETH_DATA_LEN; 205 int min_mtu = ETH_DATA_LEN;
@@ -285,7 +279,8 @@ static void hardif_deactivate_interface(struct hard_iface *hard_iface)
285 update_min_mtu(hard_iface->soft_iface); 279 update_min_mtu(hard_iface->soft_iface);
286} 280}
287 281
288int hardif_enable_interface(struct hard_iface *hard_iface, char *iface_name) 282int hardif_enable_interface(struct hard_iface *hard_iface,
283 const char *iface_name)
289{ 284{
290 struct bat_priv *bat_priv; 285 struct bat_priv *bat_priv;
291 struct batman_packet *batman_packet; 286 struct batman_packet *batman_packet;
@@ -336,10 +331,11 @@ int hardif_enable_interface(struct hard_iface *hard_iface, char *iface_name)
336 batman_packet = (struct batman_packet *)(hard_iface->packet_buff); 331 batman_packet = (struct batman_packet *)(hard_iface->packet_buff);
337 batman_packet->packet_type = BAT_PACKET; 332 batman_packet->packet_type = BAT_PACKET;
338 batman_packet->version = COMPAT_VERSION; 333 batman_packet->version = COMPAT_VERSION;
339 batman_packet->flags = 0; 334 batman_packet->flags = NO_FLAGS;
340 batman_packet->ttl = 2; 335 batman_packet->ttl = 2;
341 batman_packet->tq = TQ_MAX_VALUE; 336 batman_packet->tq = TQ_MAX_VALUE;
342 batman_packet->num_tt = 0; 337 batman_packet->tt_num_changes = 0;
338 batman_packet->ttvn = 0;
343 339
344 hard_iface->if_num = bat_priv->num_ifaces; 340 hard_iface->if_num = bat_priv->num_ifaces;
345 bat_priv->num_ifaces++; 341 bat_priv->num_ifaces++;
@@ -458,7 +454,7 @@ static struct hard_iface *hardif_add_interface(struct net_device *net_dev)
458 454
459 dev_hold(net_dev); 455 dev_hold(net_dev);
460 456
461 hard_iface = kmalloc(sizeof(struct hard_iface), GFP_ATOMIC); 457 hard_iface = kmalloc(sizeof(*hard_iface), GFP_ATOMIC);
462 if (!hard_iface) { 458 if (!hard_iface) {
463 pr_err("Can't add interface (%s): out of memory\n", 459 pr_err("Can't add interface (%s): out of memory\n",
464 net_dev->name); 460 net_dev->name);
@@ -522,7 +518,7 @@ void hardif_remove_interfaces(void)
522static int hard_if_event(struct notifier_block *this, 518static int hard_if_event(struct notifier_block *this,
523 unsigned long event, void *ptr) 519 unsigned long event, void *ptr)
524{ 520{
525 struct net_device *net_dev = (struct net_device *)ptr; 521 struct net_device *net_dev = ptr;
526 struct hard_iface *hard_iface = hardif_get_by_netdev(net_dev); 522 struct hard_iface *hard_iface = hardif_get_by_netdev(net_dev);
527 struct hard_iface *primary_if = NULL; 523 struct hard_iface *primary_if = NULL;
528 struct bat_priv *bat_priv; 524 struct bat_priv *bat_priv;
@@ -567,7 +563,7 @@ static int hard_if_event(struct notifier_block *this,
567 break; 563 break;
568 default: 564 default:
569 break; 565 break;
570 }; 566 }
571 567
572hardif_put: 568hardif_put:
573 hardif_free_ref(hard_iface); 569 hardif_free_ref(hard_iface);
@@ -658,6 +654,14 @@ static int batman_skb_recv(struct sk_buff *skb, struct net_device *dev,
658 case BAT_VIS: 654 case BAT_VIS:
659 ret = recv_vis_packet(skb, hard_iface); 655 ret = recv_vis_packet(skb, hard_iface);
660 break; 656 break;
657 /* Translation table query (request or response) */
658 case BAT_TT_QUERY:
659 ret = recv_tt_query(skb, hard_iface);
660 break;
661 /* Roaming advertisement */
662 case BAT_ROAM_ADV:
663 ret = recv_roam_adv(skb, hard_iface);
664 break;
661 default: 665 default:
662 ret = NET_RX_DROP; 666 ret = NET_RX_DROP;
663 } 667 }
diff --git a/net/batman-adv/hard-interface.h b/net/batman-adv/hard-interface.h
index 64265991460..442eacbc9e3 100644
--- a/net/batman-adv/hard-interface.h
+++ b/net/batman-adv/hard-interface.h
@@ -22,17 +22,21 @@
22#ifndef _NET_BATMAN_ADV_HARD_INTERFACE_H_ 22#ifndef _NET_BATMAN_ADV_HARD_INTERFACE_H_
23#define _NET_BATMAN_ADV_HARD_INTERFACE_H_ 23#define _NET_BATMAN_ADV_HARD_INTERFACE_H_
24 24
25#define IF_NOT_IN_USE 0 25enum hard_if_state {
26#define IF_TO_BE_REMOVED 1 26 IF_NOT_IN_USE,
27#define IF_INACTIVE 2 27 IF_TO_BE_REMOVED,
28#define IF_ACTIVE 3 28 IF_INACTIVE,
29#define IF_TO_BE_ACTIVATED 4 29 IF_ACTIVE,
30#define IF_I_WANT_YOU 5 30 IF_TO_BE_ACTIVATED,
31 IF_I_WANT_YOU
32};
31 33
32extern struct notifier_block hard_if_notifier; 34extern struct notifier_block hard_if_notifier;
33 35
34struct hard_iface *hardif_get_by_netdev(struct net_device *net_dev); 36struct hard_iface*
35int hardif_enable_interface(struct hard_iface *hard_iface, char *iface_name); 37hardif_get_by_netdev(const struct net_device *net_dev);
38int hardif_enable_interface(struct hard_iface *hard_iface,
39 const char *iface_name);
36void hardif_disable_interface(struct hard_iface *hard_iface); 40void hardif_disable_interface(struct hard_iface *hard_iface);
37void hardif_remove_interfaces(void); 41void hardif_remove_interfaces(void);
38int hardif_min_mtu(struct net_device *soft_iface); 42int hardif_min_mtu(struct net_device *soft_iface);
diff --git a/net/batman-adv/hash.c b/net/batman-adv/hash.c
index c5213d8f2cc..2a172505f51 100644
--- a/net/batman-adv/hash.c
+++ b/net/batman-adv/hash.c
@@ -46,15 +46,16 @@ struct hashtable_t *hash_new(int size)
46{ 46{
47 struct hashtable_t *hash; 47 struct hashtable_t *hash;
48 48
49 hash = kmalloc(sizeof(struct hashtable_t), GFP_ATOMIC); 49 hash = kmalloc(sizeof(*hash), GFP_ATOMIC);
50 if (!hash) 50 if (!hash)
51 return NULL; 51 return NULL;
52 52
53 hash->table = kmalloc(sizeof(struct element_t *) * size, GFP_ATOMIC); 53 hash->table = kmalloc(sizeof(*hash->table) * size, GFP_ATOMIC);
54 if (!hash->table) 54 if (!hash->table)
55 goto free_hash; 55 goto free_hash;
56 56
57 hash->list_locks = kmalloc(sizeof(spinlock_t) * size, GFP_ATOMIC); 57 hash->list_locks = kmalloc(sizeof(*hash->list_locks) * size,
58 GFP_ATOMIC);
58 if (!hash->list_locks) 59 if (!hash->list_locks)
59 goto free_table; 60 goto free_table;
60 61
diff --git a/net/batman-adv/hash.h b/net/batman-adv/hash.h
index 434822b2747..dd5c9fd7a90 100644
--- a/net/batman-adv/hash.h
+++ b/net/batman-adv/hash.h
@@ -28,12 +28,12 @@
28 * compare 2 element datas for their keys, 28 * compare 2 element datas for their keys,
29 * return 0 if same and not 0 if not 29 * return 0 if same and not 0 if not
30 * same */ 30 * same */
31typedef int (*hashdata_compare_cb)(struct hlist_node *, void *); 31typedef int (*hashdata_compare_cb)(const struct hlist_node *, const void *);
32 32
33/* the hashfunction, should return an index 33/* the hashfunction, should return an index
34 * based on the key in the data of the first 34 * based on the key in the data of the first
35 * argument and the size the second */ 35 * argument and the size the second */
36typedef int (*hashdata_choose_cb)(void *, int); 36typedef int (*hashdata_choose_cb)(const void *, int);
37typedef void (*hashdata_free_cb)(struct hlist_node *, void *); 37typedef void (*hashdata_free_cb)(struct hlist_node *, void *);
38 38
39struct hashtable_t { 39struct hashtable_t {
@@ -80,7 +80,7 @@ static inline void hash_delete(struct hashtable_t *hash,
80static inline int hash_add(struct hashtable_t *hash, 80static inline int hash_add(struct hashtable_t *hash,
81 hashdata_compare_cb compare, 81 hashdata_compare_cb compare,
82 hashdata_choose_cb choose, 82 hashdata_choose_cb choose,
83 void *data, struct hlist_node *data_node) 83 const void *data, struct hlist_node *data_node)
84{ 84{
85 int index; 85 int index;
86 struct hlist_head *head; 86 struct hlist_head *head;
diff --git a/net/batman-adv/icmp_socket.c b/net/batman-adv/icmp_socket.c
index fa22ba2bb83..ac3520e057c 100644
--- a/net/batman-adv/icmp_socket.c
+++ b/net/batman-adv/icmp_socket.c
@@ -46,7 +46,7 @@ static int bat_socket_open(struct inode *inode, struct file *file)
46 46
47 nonseekable_open(inode, file); 47 nonseekable_open(inode, file);
48 48
49 socket_client = kmalloc(sizeof(struct socket_client), GFP_KERNEL); 49 socket_client = kmalloc(sizeof(*socket_client), GFP_KERNEL);
50 50
51 if (!socket_client) 51 if (!socket_client)
52 return -ENOMEM; 52 return -ENOMEM;
@@ -310,7 +310,7 @@ static void bat_socket_add_packet(struct socket_client *socket_client,
310{ 310{
311 struct socket_packet *socket_packet; 311 struct socket_packet *socket_packet;
312 312
313 socket_packet = kmalloc(sizeof(struct socket_packet), GFP_ATOMIC); 313 socket_packet = kmalloc(sizeof(*socket_packet), GFP_ATOMIC);
314 314
315 if (!socket_packet) 315 if (!socket_packet)
316 return; 316 return;
diff --git a/net/batman-adv/main.c b/net/batman-adv/main.c
index 0a7cee0076f..b0f9068ade5 100644
--- a/net/batman-adv/main.c
+++ b/net/batman-adv/main.c
@@ -58,9 +58,8 @@ static int __init batman_init(void)
58 58
59 register_netdevice_notifier(&hard_if_notifier); 59 register_netdevice_notifier(&hard_if_notifier);
60 60
61 pr_info("B.A.T.M.A.N. advanced %s%s (compatibility version %i) " 61 pr_info("B.A.T.M.A.N. advanced %s (compatibility version %i) "
62 "loaded\n", SOURCE_VERSION, REVISION_VERSION_STR, 62 "loaded\n", SOURCE_VERSION, COMPAT_VERSION);
63 COMPAT_VERSION);
64 63
65 return 0; 64 return 0;
66} 65}
@@ -84,8 +83,10 @@ int mesh_init(struct net_device *soft_iface)
84 83
85 spin_lock_init(&bat_priv->forw_bat_list_lock); 84 spin_lock_init(&bat_priv->forw_bat_list_lock);
86 spin_lock_init(&bat_priv->forw_bcast_list_lock); 85 spin_lock_init(&bat_priv->forw_bcast_list_lock);
87 spin_lock_init(&bat_priv->tt_lhash_lock); 86 spin_lock_init(&bat_priv->tt_changes_list_lock);
88 spin_lock_init(&bat_priv->tt_ghash_lock); 87 spin_lock_init(&bat_priv->tt_req_list_lock);
88 spin_lock_init(&bat_priv->tt_roam_list_lock);
89 spin_lock_init(&bat_priv->tt_buff_lock);
89 spin_lock_init(&bat_priv->gw_list_lock); 90 spin_lock_init(&bat_priv->gw_list_lock);
90 spin_lock_init(&bat_priv->vis_hash_lock); 91 spin_lock_init(&bat_priv->vis_hash_lock);
91 spin_lock_init(&bat_priv->vis_list_lock); 92 spin_lock_init(&bat_priv->vis_list_lock);
@@ -96,14 +97,14 @@ int mesh_init(struct net_device *soft_iface)
96 INIT_HLIST_HEAD(&bat_priv->forw_bcast_list); 97 INIT_HLIST_HEAD(&bat_priv->forw_bcast_list);
97 INIT_HLIST_HEAD(&bat_priv->gw_list); 98 INIT_HLIST_HEAD(&bat_priv->gw_list);
98 INIT_HLIST_HEAD(&bat_priv->softif_neigh_vids); 99 INIT_HLIST_HEAD(&bat_priv->softif_neigh_vids);
100 INIT_LIST_HEAD(&bat_priv->tt_changes_list);
101 INIT_LIST_HEAD(&bat_priv->tt_req_list);
102 INIT_LIST_HEAD(&bat_priv->tt_roam_list);
99 103
100 if (originator_init(bat_priv) < 1) 104 if (originator_init(bat_priv) < 1)
101 goto err; 105 goto err;
102 106
103 if (tt_local_init(bat_priv) < 1) 107 if (tt_init(bat_priv) < 1)
104 goto err;
105
106 if (tt_global_init(bat_priv) < 1)
107 goto err; 108 goto err;
108 109
109 tt_local_add(soft_iface, soft_iface->dev_addr); 110 tt_local_add(soft_iface, soft_iface->dev_addr);
@@ -111,6 +112,7 @@ int mesh_init(struct net_device *soft_iface)
111 if (vis_init(bat_priv) < 1) 112 if (vis_init(bat_priv) < 1)
112 goto err; 113 goto err;
113 114
115 atomic_set(&bat_priv->gw_reselect, 0);
114 atomic_set(&bat_priv->mesh_state, MESH_ACTIVE); 116 atomic_set(&bat_priv->mesh_state, MESH_ACTIVE);
115 goto end; 117 goto end;
116 118
@@ -137,8 +139,7 @@ void mesh_free(struct net_device *soft_iface)
137 gw_node_purge(bat_priv); 139 gw_node_purge(bat_priv);
138 originator_free(bat_priv); 140 originator_free(bat_priv);
139 141
140 tt_local_free(bat_priv); 142 tt_free(bat_priv);
141 tt_global_free(bat_priv);
142 143
143 softif_neigh_purge(bat_priv); 144 softif_neigh_purge(bat_priv);
144 145
@@ -155,9 +156,9 @@ void dec_module_count(void)
155 module_put(THIS_MODULE); 156 module_put(THIS_MODULE);
156} 157}
157 158
158int is_my_mac(uint8_t *addr) 159int is_my_mac(const uint8_t *addr)
159{ 160{
160 struct hard_iface *hard_iface; 161 const struct hard_iface *hard_iface;
161 162
162 rcu_read_lock(); 163 rcu_read_lock();
163 list_for_each_entry_rcu(hard_iface, &hardif_list, list) { 164 list_for_each_entry_rcu(hard_iface, &hardif_list, list) {
@@ -182,8 +183,4 @@ MODULE_LICENSE("GPL");
182MODULE_AUTHOR(DRIVER_AUTHOR); 183MODULE_AUTHOR(DRIVER_AUTHOR);
183MODULE_DESCRIPTION(DRIVER_DESC); 184MODULE_DESCRIPTION(DRIVER_DESC);
184MODULE_SUPPORTED_DEVICE(DRIVER_DEVICE); 185MODULE_SUPPORTED_DEVICE(DRIVER_DEVICE);
185#ifdef REVISION_VERSION
186MODULE_VERSION(SOURCE_VERSION "-" REVISION_VERSION);
187#else
188MODULE_VERSION(SOURCE_VERSION); 186MODULE_VERSION(SOURCE_VERSION);
189#endif
diff --git a/net/batman-adv/main.h b/net/batman-adv/main.h
index 148b49e0264..a6df61a6933 100644
--- a/net/batman-adv/main.h
+++ b/net/batman-adv/main.h
@@ -27,8 +27,9 @@
27#define DRIVER_DESC "B.A.T.M.A.N. advanced" 27#define DRIVER_DESC "B.A.T.M.A.N. advanced"
28#define DRIVER_DEVICE "batman-adv" 28#define DRIVER_DEVICE "batman-adv"
29 29
30#define SOURCE_VERSION "next" 30#ifndef SOURCE_VERSION
31 31#define SOURCE_VERSION "2011.3.0"
32#endif
32 33
33/* B.A.T.M.A.N. parameters */ 34/* B.A.T.M.A.N. parameters */
34 35
@@ -42,15 +43,25 @@
42 * -> TODO: check influence on TQ_LOCAL_WINDOW_SIZE */ 43 * -> TODO: check influence on TQ_LOCAL_WINDOW_SIZE */
43#define PURGE_TIMEOUT 200 44#define PURGE_TIMEOUT 200
44#define TT_LOCAL_TIMEOUT 3600 /* in seconds */ 45#define TT_LOCAL_TIMEOUT 3600 /* in seconds */
45 46#define TT_CLIENT_ROAM_TIMEOUT 600
46/* sliding packet range of received originator messages in squence numbers 47/* sliding packet range of received originator messages in squence numbers
47 * (should be a multiple of our word size) */ 48 * (should be a multiple of our word size) */
48#define TQ_LOCAL_WINDOW_SIZE 64 49#define TQ_LOCAL_WINDOW_SIZE 64
50#define TT_REQUEST_TIMEOUT 3 /* seconds we have to keep pending tt_req */
51
49#define TQ_GLOBAL_WINDOW_SIZE 5 52#define TQ_GLOBAL_WINDOW_SIZE 5
50#define TQ_LOCAL_BIDRECT_SEND_MINIMUM 1 53#define TQ_LOCAL_BIDRECT_SEND_MINIMUM 1
51#define TQ_LOCAL_BIDRECT_RECV_MINIMUM 1 54#define TQ_LOCAL_BIDRECT_RECV_MINIMUM 1
52#define TQ_TOTAL_BIDRECT_LIMIT 1 55#define TQ_TOTAL_BIDRECT_LIMIT 1
53 56
57#define TT_OGM_APPEND_MAX 3 /* number of OGMs sent with the last tt diff */
58
59#define ROAMING_MAX_TIME 20 /* Time in which a client can roam at most
60 * ROAMING_MAX_COUNT times */
61#define ROAMING_MAX_COUNT 5
62
63#define NO_FLAGS 0
64
54#define NUM_WORDS (TQ_LOCAL_WINDOW_SIZE / WORD_BIT_SIZE) 65#define NUM_WORDS (TQ_LOCAL_WINDOW_SIZE / WORD_BIT_SIZE)
55 66
56#define LOG_BUF_LEN 8192 /* has to be a power of 2 */ 67#define LOG_BUF_LEN 8192 /* has to be a power of 2 */
@@ -72,13 +83,27 @@
72#define RESET_PROTECTION_MS 30000 83#define RESET_PROTECTION_MS 30000
73#define EXPECTED_SEQNO_RANGE 65536 84#define EXPECTED_SEQNO_RANGE 65536
74 85
75#define MESH_INACTIVE 0 86enum mesh_state {
76#define MESH_ACTIVE 1 87 MESH_INACTIVE,
77#define MESH_DEACTIVATING 2 88 MESH_ACTIVE,
89 MESH_DEACTIVATING
90};
78 91
79#define BCAST_QUEUE_LEN 256 92#define BCAST_QUEUE_LEN 256
80#define BATMAN_QUEUE_LEN 256 93#define BATMAN_QUEUE_LEN 256
81 94
95enum uev_action {
96 UEV_ADD = 0,
97 UEV_DEL,
98 UEV_CHANGE
99};
100
101enum uev_type {
102 UEV_GW = 0
103};
104
105#define GW_THRESHOLD 50
106
82/* 107/*
83 * Debug Messages 108 * Debug Messages
84 */ 109 */
@@ -89,10 +114,12 @@
89#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 114#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
90 115
91/* all messages related to routing / flooding / broadcasting / etc */ 116/* all messages related to routing / flooding / broadcasting / etc */
92#define DBG_BATMAN 1 117enum dbg_level {
93/* route or tt entry added / changed / deleted */ 118 DBG_BATMAN = 1 << 0,
94#define DBG_ROUTES 2 119 DBG_ROUTES = 1 << 1, /* route added / changed / deleted */
95#define DBG_ALL 3 120 DBG_TT = 1 << 2, /* translation table operations */
121 DBG_ALL = 7
122};
96 123
97 124
98/* 125/*
@@ -118,12 +145,6 @@
118#include <linux/seq_file.h> 145#include <linux/seq_file.h>
119#include "types.h" 146#include "types.h"
120 147
121#ifndef REVISION_VERSION
122#define REVISION_VERSION_STR ""
123#else
124#define REVISION_VERSION_STR " "REVISION_VERSION
125#endif
126
127extern struct list_head hardif_list; 148extern struct list_head hardif_list;
128 149
129extern unsigned char broadcast_addr[]; 150extern unsigned char broadcast_addr[];
@@ -133,10 +154,10 @@ int mesh_init(struct net_device *soft_iface);
133void mesh_free(struct net_device *soft_iface); 154void mesh_free(struct net_device *soft_iface);
134void inc_module_count(void); 155void inc_module_count(void);
135void dec_module_count(void); 156void dec_module_count(void);
136int is_my_mac(uint8_t *addr); 157int is_my_mac(const uint8_t *addr);
137 158
138#ifdef CONFIG_BATMAN_ADV_DEBUG 159#ifdef CONFIG_BATMAN_ADV_DEBUG
139int debug_log(struct bat_priv *bat_priv, char *fmt, ...); 160int debug_log(struct bat_priv *bat_priv, const char *fmt, ...) __printf(2, 3);
140 161
141#define bat_dbg(type, bat_priv, fmt, arg...) \ 162#define bat_dbg(type, bat_priv, fmt, arg...) \
142 do { \ 163 do { \
@@ -145,9 +166,10 @@ int debug_log(struct bat_priv *bat_priv, char *fmt, ...);
145 } \ 166 } \
146 while (0) 167 while (0)
147#else /* !CONFIG_BATMAN_ADV_DEBUG */ 168#else /* !CONFIG_BATMAN_ADV_DEBUG */
148static inline void bat_dbg(char type __always_unused, 169__printf(3, 4)
170static inline void bat_dbg(int type __always_unused,
149 struct bat_priv *bat_priv __always_unused, 171 struct bat_priv *bat_priv __always_unused,
150 char *fmt __always_unused, ...) 172 const char *fmt __always_unused, ...)
151{ 173{
152} 174}
153#endif 175#endif
@@ -172,11 +194,32 @@ static inline void bat_dbg(char type __always_unused,
172 * 194 *
173 * note: can't use compare_ether_addr() as it requires aligned memory 195 * note: can't use compare_ether_addr() as it requires aligned memory
174 */ 196 */
175static inline int compare_eth(void *data1, void *data2) 197
198static inline int compare_eth(const void *data1, const void *data2)
176{ 199{
177 return (memcmp(data1, data2, ETH_ALEN) == 0 ? 1 : 0); 200 return (memcmp(data1, data2, ETH_ALEN) == 0 ? 1 : 0);
178} 201}
179 202
203
180#define atomic_dec_not_zero(v) atomic_add_unless((v), -1, 0) 204#define atomic_dec_not_zero(v) atomic_add_unless((v), -1, 0)
181 205
206/* Returns the smallest signed integer in two's complement with the sizeof x */
207#define smallest_signed_int(x) (1u << (7u + 8u * (sizeof(x) - 1u)))
208
209/* Checks if a sequence number x is a predecessor/successor of y.
210 * they handle overflows/underflows and can correctly check for a
211 * predecessor/successor unless the variable sequence number has grown by
212 * more then 2**(bitwidth(x)-1)-1.
213 * This means that for a uint8_t with the maximum value 255, it would think:
214 * - when adding nothing - it is neither a predecessor nor a successor
215 * - before adding more than 127 to the starting value - it is a predecessor,
216 * - when adding 128 - it is neither a predecessor nor a successor,
217 * - after adding more than 127 to the starting value - it is a successor */
218#define seq_before(x, y) ({typeof(x) _d1 = (x); \
219 typeof(y) _d2 = (y); \
220 typeof(x) _dummy = (_d1 - _d2); \
221 (void) (&_d1 == &_d2); \
222 _dummy > smallest_signed_int(_dummy); })
223#define seq_after(x, y) seq_before(y, x)
224
182#endif /* _NET_BATMAN_ADV_MAIN_H_ */ 225#endif /* _NET_BATMAN_ADV_MAIN_H_ */
diff --git a/net/batman-adv/originator.c b/net/batman-adv/originator.c
index 40a30bbcd14..f3c3f620d19 100644
--- a/net/batman-adv/originator.c
+++ b/net/batman-adv/originator.c
@@ -37,6 +37,14 @@ static void start_purge_timer(struct bat_priv *bat_priv)
37 queue_delayed_work(bat_event_workqueue, &bat_priv->orig_work, 1 * HZ); 37 queue_delayed_work(bat_event_workqueue, &bat_priv->orig_work, 1 * HZ);
38} 38}
39 39
40/* returns 1 if they are the same originator */
41static int compare_orig(const struct hlist_node *node, const void *data2)
42{
43 const void *data1 = container_of(node, struct orig_node, hash_entry);
44
45 return (memcmp(data1, data2, ETH_ALEN) == 0 ? 1 : 0);
46}
47
40int originator_init(struct bat_priv *bat_priv) 48int originator_init(struct bat_priv *bat_priv)
41{ 49{
42 if (bat_priv->orig_hash) 50 if (bat_priv->orig_hash)
@@ -77,7 +85,7 @@ struct neigh_node *orig_node_get_router(struct orig_node *orig_node)
77 85
78struct neigh_node *create_neighbor(struct orig_node *orig_node, 86struct neigh_node *create_neighbor(struct orig_node *orig_node,
79 struct orig_node *orig_neigh_node, 87 struct orig_node *orig_neigh_node,
80 uint8_t *neigh, 88 const uint8_t *neigh,
81 struct hard_iface *if_incoming) 89 struct hard_iface *if_incoming)
82{ 90{
83 struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface); 91 struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
@@ -86,7 +94,7 @@ struct neigh_node *create_neighbor(struct orig_node *orig_node,
86 bat_dbg(DBG_BATMAN, bat_priv, 94 bat_dbg(DBG_BATMAN, bat_priv,
87 "Creating new last-hop neighbor of originator\n"); 95 "Creating new last-hop neighbor of originator\n");
88 96
89 neigh_node = kzalloc(sizeof(struct neigh_node), GFP_ATOMIC); 97 neigh_node = kzalloc(sizeof(*neigh_node), GFP_ATOMIC);
90 if (!neigh_node) 98 if (!neigh_node)
91 return NULL; 99 return NULL;
92 100
@@ -137,6 +145,7 @@ static void orig_node_free_rcu(struct rcu_head *rcu)
137 tt_global_del_orig(orig_node->bat_priv, orig_node, 145 tt_global_del_orig(orig_node->bat_priv, orig_node,
138 "originator timed out"); 146 "originator timed out");
139 147
148 kfree(orig_node->tt_buff);
140 kfree(orig_node->bcast_own); 149 kfree(orig_node->bcast_own);
141 kfree(orig_node->bcast_own_sum); 150 kfree(orig_node->bcast_own_sum);
142 kfree(orig_node); 151 kfree(orig_node);
@@ -183,7 +192,7 @@ void originator_free(struct bat_priv *bat_priv)
183 192
184/* this function finds or creates an originator entry for the given 193/* this function finds or creates an originator entry for the given
185 * address if it does not exits */ 194 * address if it does not exits */
186struct orig_node *get_orig_node(struct bat_priv *bat_priv, uint8_t *addr) 195struct orig_node *get_orig_node(struct bat_priv *bat_priv, const uint8_t *addr)
187{ 196{
188 struct orig_node *orig_node; 197 struct orig_node *orig_node;
189 int size; 198 int size;
@@ -196,7 +205,7 @@ struct orig_node *get_orig_node(struct bat_priv *bat_priv, uint8_t *addr)
196 bat_dbg(DBG_BATMAN, bat_priv, 205 bat_dbg(DBG_BATMAN, bat_priv,
197 "Creating new originator: %pM\n", addr); 206 "Creating new originator: %pM\n", addr);
198 207
199 orig_node = kzalloc(sizeof(struct orig_node), GFP_ATOMIC); 208 orig_node = kzalloc(sizeof(*orig_node), GFP_ATOMIC);
200 if (!orig_node) 209 if (!orig_node)
201 return NULL; 210 return NULL;
202 211
@@ -205,14 +214,20 @@ struct orig_node *get_orig_node(struct bat_priv *bat_priv, uint8_t *addr)
205 spin_lock_init(&orig_node->ogm_cnt_lock); 214 spin_lock_init(&orig_node->ogm_cnt_lock);
206 spin_lock_init(&orig_node->bcast_seqno_lock); 215 spin_lock_init(&orig_node->bcast_seqno_lock);
207 spin_lock_init(&orig_node->neigh_list_lock); 216 spin_lock_init(&orig_node->neigh_list_lock);
217 spin_lock_init(&orig_node->tt_buff_lock);
208 218
209 /* extra reference for return */ 219 /* extra reference for return */
210 atomic_set(&orig_node->refcount, 2); 220 atomic_set(&orig_node->refcount, 2);
211 221
222 orig_node->tt_poss_change = false;
212 orig_node->bat_priv = bat_priv; 223 orig_node->bat_priv = bat_priv;
213 memcpy(orig_node->orig, addr, ETH_ALEN); 224 memcpy(orig_node->orig, addr, ETH_ALEN);
214 orig_node->router = NULL; 225 orig_node->router = NULL;
226 orig_node->tt_crc = 0;
227 atomic_set(&orig_node->last_ttvn, 0);
215 orig_node->tt_buff = NULL; 228 orig_node->tt_buff = NULL;
229 orig_node->tt_buff_len = 0;
230 atomic_set(&orig_node->tt_size, 0);
216 orig_node->bcast_seqno_reset = jiffies - 1 231 orig_node->bcast_seqno_reset = jiffies - 1
217 - msecs_to_jiffies(RESET_PROTECTION_MS); 232 - msecs_to_jiffies(RESET_PROTECTION_MS);
218 orig_node->batman_seqno_reset = jiffies - 1 233 orig_node->batman_seqno_reset = jiffies - 1
@@ -322,9 +337,7 @@ static bool purge_orig_node(struct bat_priv *bat_priv,
322 if (purge_orig_neighbors(bat_priv, orig_node, 337 if (purge_orig_neighbors(bat_priv, orig_node,
323 &best_neigh_node)) { 338 &best_neigh_node)) {
324 update_routes(bat_priv, orig_node, 339 update_routes(bat_priv, orig_node,
325 best_neigh_node, 340 best_neigh_node);
326 orig_node->tt_buff,
327 orig_node->tt_buff_len);
328 } 341 }
329 } 342 }
330 343
@@ -419,9 +432,8 @@ int orig_seq_print_text(struct seq_file *seq, void *offset)
419 goto out; 432 goto out;
420 } 433 }
421 434
422 seq_printf(seq, "[B.A.T.M.A.N. adv %s%s, MainIF/MAC: %s/%pM (%s)]\n", 435 seq_printf(seq, "[B.A.T.M.A.N. adv %s, MainIF/MAC: %s/%pM (%s)]\n",
423 SOURCE_VERSION, REVISION_VERSION_STR, 436 SOURCE_VERSION, primary_if->net_dev->name,
424 primary_if->net_dev->name,
425 primary_if->net_dev->dev_addr, net_dev->name); 437 primary_if->net_dev->dev_addr, net_dev->name);
426 seq_printf(seq, " %-15s %s (%s/%i) %17s [%10s]: %20s ...\n", 438 seq_printf(seq, " %-15s %s (%s/%i) %17s [%10s]: %20s ...\n",
427 "Originator", "last-seen", "#", TQ_MAX_VALUE, "Nexthop", 439 "Originator", "last-seen", "#", TQ_MAX_VALUE, "Nexthop",
@@ -559,7 +571,7 @@ static int orig_node_del_if(struct orig_node *orig_node,
559 memcpy(data_ptr, orig_node->bcast_own, del_if_num * chunk_size); 571 memcpy(data_ptr, orig_node->bcast_own, del_if_num * chunk_size);
560 572
561 /* copy second part */ 573 /* copy second part */
562 memcpy(data_ptr + del_if_num * chunk_size, 574 memcpy((char *)data_ptr + del_if_num * chunk_size,
563 orig_node->bcast_own + ((del_if_num + 1) * chunk_size), 575 orig_node->bcast_own + ((del_if_num + 1) * chunk_size),
564 (max_if_num - del_if_num) * chunk_size); 576 (max_if_num - del_if_num) * chunk_size);
565 577
@@ -579,7 +591,7 @@ free_bcast_own:
579 memcpy(data_ptr, orig_node->bcast_own_sum, 591 memcpy(data_ptr, orig_node->bcast_own_sum,
580 del_if_num * sizeof(uint8_t)); 592 del_if_num * sizeof(uint8_t));
581 593
582 memcpy(data_ptr + del_if_num * sizeof(uint8_t), 594 memcpy((char *)data_ptr + del_if_num * sizeof(uint8_t),
583 orig_node->bcast_own_sum + ((del_if_num + 1) * sizeof(uint8_t)), 595 orig_node->bcast_own_sum + ((del_if_num + 1) * sizeof(uint8_t)),
584 (max_if_num - del_if_num) * sizeof(uint8_t)); 596 (max_if_num - del_if_num) * sizeof(uint8_t));
585 597
diff --git a/net/batman-adv/originator.h b/net/batman-adv/originator.h
index e1d641f27aa..cfc1f60a96a 100644
--- a/net/batman-adv/originator.h
+++ b/net/batman-adv/originator.h
@@ -28,10 +28,10 @@ int originator_init(struct bat_priv *bat_priv);
28void originator_free(struct bat_priv *bat_priv); 28void originator_free(struct bat_priv *bat_priv);
29void purge_orig_ref(struct bat_priv *bat_priv); 29void purge_orig_ref(struct bat_priv *bat_priv);
30void orig_node_free_ref(struct orig_node *orig_node); 30void orig_node_free_ref(struct orig_node *orig_node);
31struct orig_node *get_orig_node(struct bat_priv *bat_priv, uint8_t *addr); 31struct orig_node *get_orig_node(struct bat_priv *bat_priv, const uint8_t *addr);
32struct neigh_node *create_neighbor(struct orig_node *orig_node, 32struct neigh_node *create_neighbor(struct orig_node *orig_node,
33 struct orig_node *orig_neigh_node, 33 struct orig_node *orig_neigh_node,
34 uint8_t *neigh, 34 const uint8_t *neigh,
35 struct hard_iface *if_incoming); 35 struct hard_iface *if_incoming);
36void neigh_node_free_ref(struct neigh_node *neigh_node); 36void neigh_node_free_ref(struct neigh_node *neigh_node);
37struct neigh_node *orig_node_get_router(struct orig_node *orig_node); 37struct neigh_node *orig_node_get_router(struct orig_node *orig_node);
@@ -40,19 +40,11 @@ int orig_hash_add_if(struct hard_iface *hard_iface, int max_if_num);
40int orig_hash_del_if(struct hard_iface *hard_iface, int max_if_num); 40int orig_hash_del_if(struct hard_iface *hard_iface, int max_if_num);
41 41
42 42
43/* returns 1 if they are the same originator */
44static inline int compare_orig(struct hlist_node *node, void *data2)
45{
46 void *data1 = container_of(node, struct orig_node, hash_entry);
47
48 return (memcmp(data1, data2, ETH_ALEN) == 0 ? 1 : 0);
49}
50
51/* hashfunction to choose an entry in a hash table of given size */ 43/* hashfunction to choose an entry in a hash table of given size */
52/* hash algorithm from http://en.wikipedia.org/wiki/Hash_table */ 44/* hash algorithm from http://en.wikipedia.org/wiki/Hash_table */
53static inline int choose_orig(void *data, int32_t size) 45static inline int choose_orig(const void *data, int32_t size)
54{ 46{
55 unsigned char *key = data; 47 const unsigned char *key = data;
56 uint32_t hash = 0; 48 uint32_t hash = 0;
57 size_t i; 49 size_t i;
58 50
@@ -70,7 +62,7 @@ static inline int choose_orig(void *data, int32_t size)
70} 62}
71 63
72static inline struct orig_node *orig_hash_find(struct bat_priv *bat_priv, 64static inline struct orig_node *orig_hash_find(struct bat_priv *bat_priv,
73 void *data) 65 const void *data)
74{ 66{
75 struct hashtable_t *hash = bat_priv->orig_hash; 67 struct hashtable_t *hash = bat_priv->orig_hash;
76 struct hlist_head *head; 68 struct hlist_head *head;
diff --git a/net/batman-adv/packet.h b/net/batman-adv/packet.h
index eda99650e9f..b76b4be10b9 100644
--- a/net/batman-adv/packet.h
+++ b/net/batman-adv/packet.h
@@ -24,46 +24,84 @@
24 24
25#define ETH_P_BATMAN 0x4305 /* unofficial/not registered Ethertype */ 25#define ETH_P_BATMAN 0x4305 /* unofficial/not registered Ethertype */
26 26
27#define BAT_PACKET 0x01 27enum bat_packettype {
28#define BAT_ICMP 0x02 28 BAT_PACKET = 0x01,
29#define BAT_UNICAST 0x03 29 BAT_ICMP = 0x02,
30#define BAT_BCAST 0x04 30 BAT_UNICAST = 0x03,
31#define BAT_VIS 0x05 31 BAT_BCAST = 0x04,
32#define BAT_UNICAST_FRAG 0x06 32 BAT_VIS = 0x05,
33 BAT_UNICAST_FRAG = 0x06,
34 BAT_TT_QUERY = 0x07,
35 BAT_ROAM_ADV = 0x08
36};
33 37
34/* this file is included by batctl which needs these defines */ 38/* this file is included by batctl which needs these defines */
35#define COMPAT_VERSION 12 39#define COMPAT_VERSION 14
36#define DIRECTLINK 0x40 40
37#define VIS_SERVER 0x20 41enum batman_flags {
38#define PRIMARIES_FIRST_HOP 0x10 42 PRIMARIES_FIRST_HOP = 1 << 4,
43 VIS_SERVER = 1 << 5,
44 DIRECTLINK = 1 << 6
45};
39 46
40/* ICMP message types */ 47/* ICMP message types */
41#define ECHO_REPLY 0 48enum icmp_packettype {
42#define DESTINATION_UNREACHABLE 3 49 ECHO_REPLY = 0,
43#define ECHO_REQUEST 8 50 DESTINATION_UNREACHABLE = 3,
44#define TTL_EXCEEDED 11 51 ECHO_REQUEST = 8,
45#define PARAMETER_PROBLEM 12 52 TTL_EXCEEDED = 11,
53 PARAMETER_PROBLEM = 12
54};
46 55
47/* vis defines */ 56/* vis defines */
48#define VIS_TYPE_SERVER_SYNC 0 57enum vis_packettype {
49#define VIS_TYPE_CLIENT_UPDATE 1 58 VIS_TYPE_SERVER_SYNC = 0,
59 VIS_TYPE_CLIENT_UPDATE = 1
60};
50 61
51/* fragmentation defines */ 62/* fragmentation defines */
52#define UNI_FRAG_HEAD 0x01 63enum unicast_frag_flags {
53#define UNI_FRAG_LARGETAIL 0x02 64 UNI_FRAG_HEAD = 1 << 0,
65 UNI_FRAG_LARGETAIL = 1 << 1
66};
67
68/* TT_QUERY subtypes */
69#define TT_QUERY_TYPE_MASK 0x3
70
71enum tt_query_packettype {
72 TT_REQUEST = 0,
73 TT_RESPONSE = 1
74};
75
76/* TT_QUERY flags */
77enum tt_query_flags {
78 TT_FULL_TABLE = 1 << 2
79};
80
81/* TT_CLIENT flags.
82 * Flags from 1 to 1 << 7 are sent on the wire, while flags from 1 << 8 to
83 * 1 << 15 are used for local computation only */
84enum tt_client_flags {
85 TT_CLIENT_DEL = 1 << 0,
86 TT_CLIENT_ROAM = 1 << 1,
87 TT_CLIENT_NOPURGE = 1 << 8,
88 TT_CLIENT_NEW = 1 << 9,
89 TT_CLIENT_PENDING = 1 << 10
90};
54 91
55struct batman_packet { 92struct batman_packet {
56 uint8_t packet_type; 93 uint8_t packet_type;
57 uint8_t version; /* batman version field */ 94 uint8_t version; /* batman version field */
95 uint8_t ttl;
58 uint8_t flags; /* 0x40: DIRECTLINK flag, 0x20 VIS_SERVER flag... */ 96 uint8_t flags; /* 0x40: DIRECTLINK flag, 0x20 VIS_SERVER flag... */
59 uint8_t tq;
60 uint32_t seqno; 97 uint32_t seqno;
61 uint8_t orig[6]; 98 uint8_t orig[6];
62 uint8_t prev_sender[6]; 99 uint8_t prev_sender[6];
63 uint8_t ttl;
64 uint8_t num_tt;
65 uint8_t gw_flags; /* flags related to gateway class */ 100 uint8_t gw_flags; /* flags related to gateway class */
66 uint8_t align; 101 uint8_t tq;
102 uint8_t tt_num_changes;
103 uint8_t ttvn; /* translation table version number */
104 uint16_t tt_crc;
67} __packed; 105} __packed;
68 106
69#define BAT_PACKET_LEN sizeof(struct batman_packet) 107#define BAT_PACKET_LEN sizeof(struct batman_packet)
@@ -71,12 +109,13 @@ struct batman_packet {
71struct icmp_packet { 109struct icmp_packet {
72 uint8_t packet_type; 110 uint8_t packet_type;
73 uint8_t version; /* batman version field */ 111 uint8_t version; /* batman version field */
74 uint8_t msg_type; /* see ICMP message types above */
75 uint8_t ttl; 112 uint8_t ttl;
113 uint8_t msg_type; /* see ICMP message types above */
76 uint8_t dst[6]; 114 uint8_t dst[6];
77 uint8_t orig[6]; 115 uint8_t orig[6];
78 uint16_t seqno; 116 uint16_t seqno;
79 uint8_t uid; 117 uint8_t uid;
118 uint8_t reserved;
80} __packed; 119} __packed;
81 120
82#define BAT_RR_LEN 16 121#define BAT_RR_LEN 16
@@ -86,8 +125,8 @@ struct icmp_packet {
86struct icmp_packet_rr { 125struct icmp_packet_rr {
87 uint8_t packet_type; 126 uint8_t packet_type;
88 uint8_t version; /* batman version field */ 127 uint8_t version; /* batman version field */
89 uint8_t msg_type; /* see ICMP message types above */
90 uint8_t ttl; 128 uint8_t ttl;
129 uint8_t msg_type; /* see ICMP message types above */
91 uint8_t dst[6]; 130 uint8_t dst[6];
92 uint8_t orig[6]; 131 uint8_t orig[6];
93 uint16_t seqno; 132 uint16_t seqno;
@@ -99,16 +138,19 @@ struct icmp_packet_rr {
99struct unicast_packet { 138struct unicast_packet {
100 uint8_t packet_type; 139 uint8_t packet_type;
101 uint8_t version; /* batman version field */ 140 uint8_t version; /* batman version field */
102 uint8_t dest[6];
103 uint8_t ttl; 141 uint8_t ttl;
142 uint8_t ttvn; /* destination translation table version number */
143 uint8_t dest[6];
104} __packed; 144} __packed;
105 145
106struct unicast_frag_packet { 146struct unicast_frag_packet {
107 uint8_t packet_type; 147 uint8_t packet_type;
108 uint8_t version; /* batman version field */ 148 uint8_t version; /* batman version field */
109 uint8_t dest[6];
110 uint8_t ttl; 149 uint8_t ttl;
150 uint8_t ttvn; /* destination translation table version number */
151 uint8_t dest[6];
111 uint8_t flags; 152 uint8_t flags;
153 uint8_t align;
112 uint8_t orig[6]; 154 uint8_t orig[6];
113 uint16_t seqno; 155 uint16_t seqno;
114} __packed; 156} __packed;
@@ -116,21 +158,61 @@ struct unicast_frag_packet {
116struct bcast_packet { 158struct bcast_packet {
117 uint8_t packet_type; 159 uint8_t packet_type;
118 uint8_t version; /* batman version field */ 160 uint8_t version; /* batman version field */
119 uint8_t orig[6];
120 uint8_t ttl; 161 uint8_t ttl;
162 uint8_t reserved;
121 uint32_t seqno; 163 uint32_t seqno;
164 uint8_t orig[6];
122} __packed; 165} __packed;
123 166
124struct vis_packet { 167struct vis_packet {
125 uint8_t packet_type; 168 uint8_t packet_type;
126 uint8_t version; /* batman version field */ 169 uint8_t version; /* batman version field */
170 uint8_t ttl; /* TTL */
127 uint8_t vis_type; /* which type of vis-participant sent this? */ 171 uint8_t vis_type; /* which type of vis-participant sent this? */
128 uint8_t entries; /* number of entries behind this struct */
129 uint32_t seqno; /* sequence number */ 172 uint32_t seqno; /* sequence number */
130 uint8_t ttl; /* TTL */ 173 uint8_t entries; /* number of entries behind this struct */
174 uint8_t reserved;
131 uint8_t vis_orig[6]; /* originator that announces its neighbors */ 175 uint8_t vis_orig[6]; /* originator that announces its neighbors */
132 uint8_t target_orig[6]; /* who should receive this packet */ 176 uint8_t target_orig[6]; /* who should receive this packet */
133 uint8_t sender_orig[6]; /* who sent or rebroadcasted this packet */ 177 uint8_t sender_orig[6]; /* who sent or rebroadcasted this packet */
134} __packed; 178} __packed;
135 179
180struct tt_query_packet {
181 uint8_t packet_type;
182 uint8_t version; /* batman version field */
183 uint8_t ttl;
184 /* the flag field is a combination of:
185 * - TT_REQUEST or TT_RESPONSE
186 * - TT_FULL_TABLE */
187 uint8_t flags;
188 uint8_t dst[ETH_ALEN];
189 uint8_t src[ETH_ALEN];
190 /* the ttvn field is:
191 * if TT_REQUEST: ttvn that triggered the
192 * request
193 * if TT_RESPONSE: new ttvn for the src
194 * orig_node */
195 uint8_t ttvn;
196 /* tt_data field is:
197 * if TT_REQUEST: crc associated with the
198 * ttvn
199 * if TT_RESPONSE: table_size */
200 uint16_t tt_data;
201} __packed;
202
203struct roam_adv_packet {
204 uint8_t packet_type;
205 uint8_t version;
206 uint8_t ttl;
207 uint8_t reserved;
208 uint8_t dst[ETH_ALEN];
209 uint8_t src[ETH_ALEN];
210 uint8_t client[ETH_ALEN];
211} __packed;
212
213struct tt_change {
214 uint8_t flags;
215 uint8_t addr[ETH_ALEN];
216} __packed;
217
136#endif /* _NET_BATMAN_ADV_PACKET_H_ */ 218#endif /* _NET_BATMAN_ADV_PACKET_H_ */
diff --git a/net/batman-adv/ring_buffer.c b/net/batman-adv/ring_buffer.c
index 5bb6a619afe..f1ccfa76ce8 100644
--- a/net/batman-adv/ring_buffer.c
+++ b/net/batman-adv/ring_buffer.c
@@ -28,9 +28,9 @@ void ring_buffer_set(uint8_t lq_recv[], uint8_t *lq_index, uint8_t value)
28 *lq_index = (*lq_index + 1) % TQ_GLOBAL_WINDOW_SIZE; 28 *lq_index = (*lq_index + 1) % TQ_GLOBAL_WINDOW_SIZE;
29} 29}
30 30
31uint8_t ring_buffer_avg(uint8_t lq_recv[]) 31uint8_t ring_buffer_avg(const uint8_t lq_recv[])
32{ 32{
33 uint8_t *ptr; 33 const uint8_t *ptr;
34 uint16_t count = 0, i = 0, sum = 0; 34 uint16_t count = 0, i = 0, sum = 0;
35 35
36 ptr = lq_recv; 36 ptr = lq_recv;
diff --git a/net/batman-adv/ring_buffer.h b/net/batman-adv/ring_buffer.h
index 0395b274186..7cdfe62b657 100644
--- a/net/batman-adv/ring_buffer.h
+++ b/net/batman-adv/ring_buffer.h
@@ -23,6 +23,6 @@
23#define _NET_BATMAN_ADV_RING_BUFFER_H_ 23#define _NET_BATMAN_ADV_RING_BUFFER_H_
24 24
25void ring_buffer_set(uint8_t lq_recv[], uint8_t *lq_index, uint8_t value); 25void ring_buffer_set(uint8_t lq_recv[], uint8_t *lq_index, uint8_t value);
26uint8_t ring_buffer_avg(uint8_t lq_recv[]); 26uint8_t ring_buffer_avg(const uint8_t lq_recv[]);
27 27
28#endif /* _NET_BATMAN_ADV_RING_BUFFER_H_ */ 28#endif /* _NET_BATMAN_ADV_RING_BUFFER_H_ */
diff --git a/net/batman-adv/routing.c b/net/batman-adv/routing.c
index bb1c3ec7e3f..0f32c818874 100644
--- a/net/batman-adv/routing.c
+++ b/net/batman-adv/routing.c
@@ -64,28 +64,69 @@ void slide_own_bcast_window(struct hard_iface *hard_iface)
64 } 64 }
65} 65}
66 66
67static void update_TT(struct bat_priv *bat_priv, struct orig_node *orig_node, 67static void update_transtable(struct bat_priv *bat_priv,
68 unsigned char *tt_buff, int tt_buff_len) 68 struct orig_node *orig_node,
69 const unsigned char *tt_buff,
70 uint8_t tt_num_changes, uint8_t ttvn,
71 uint16_t tt_crc)
69{ 72{
70 if ((tt_buff_len != orig_node->tt_buff_len) || 73 uint8_t orig_ttvn = (uint8_t)atomic_read(&orig_node->last_ttvn);
71 ((tt_buff_len > 0) && 74 bool full_table = true;
72 (orig_node->tt_buff_len > 0) && 75
73 (memcmp(orig_node->tt_buff, tt_buff, tt_buff_len) != 0))) { 76 /* the ttvn increased by one -> we can apply the attached changes */
74 77 if (ttvn - orig_ttvn == 1) {
75 if (orig_node->tt_buff_len > 0) 78 /* the OGM could not contain the changes because they were too
76 tt_global_del_orig(bat_priv, orig_node, 79 * many to fit in one frame or because they have already been
77 "originator changed tt"); 80 * sent TT_OGM_APPEND_MAX times. In this case send a tt
78 81 * request */
79 if ((tt_buff_len > 0) && (tt_buff)) 82 if (!tt_num_changes) {
80 tt_global_add_orig(bat_priv, orig_node, 83 full_table = false;
81 tt_buff, tt_buff_len); 84 goto request_table;
85 }
86
87 tt_update_changes(bat_priv, orig_node, tt_num_changes, ttvn,
88 (struct tt_change *)tt_buff);
89
90 /* Even if we received the crc into the OGM, we prefer
91 * to recompute it to spot any possible inconsistency
92 * in the global table */
93 orig_node->tt_crc = tt_global_crc(bat_priv, orig_node);
94
95 /* The ttvn alone is not enough to guarantee consistency
96 * because a single value could repesent different states
97 * (due to the wrap around). Thus a node has to check whether
98 * the resulting table (after applying the changes) is still
99 * consistent or not. E.g. a node could disconnect while its
100 * ttvn is X and reconnect on ttvn = X + TTVN_MAX: in this case
101 * checking the CRC value is mandatory to detect the
102 * inconsistency */
103 if (orig_node->tt_crc != tt_crc)
104 goto request_table;
105
106 /* Roaming phase is over: tables are in sync again. I can
107 * unset the flag */
108 orig_node->tt_poss_change = false;
109 } else {
110 /* if we missed more than one change or our tables are not
111 * in sync anymore -> request fresh tt data */
112 if (ttvn != orig_ttvn || orig_node->tt_crc != tt_crc) {
113request_table:
114 bat_dbg(DBG_TT, bat_priv, "TT inconsistency for %pM. "
115 "Need to retrieve the correct information "
116 "(ttvn: %u last_ttvn: %u crc: %u last_crc: "
117 "%u num_changes: %u)\n", orig_node->orig, ttvn,
118 orig_ttvn, tt_crc, orig_node->tt_crc,
119 tt_num_changes);
120 send_tt_request(bat_priv, orig_node, ttvn, tt_crc,
121 full_table);
122 return;
123 }
82 } 124 }
83} 125}
84 126
85static void update_route(struct bat_priv *bat_priv, 127static void update_route(struct bat_priv *bat_priv,
86 struct orig_node *orig_node, 128 struct orig_node *orig_node,
87 struct neigh_node *neigh_node, 129 struct neigh_node *neigh_node)
88 unsigned char *tt_buff, int tt_buff_len)
89{ 130{
90 struct neigh_node *curr_router; 131 struct neigh_node *curr_router;
91 132
@@ -93,11 +134,10 @@ static void update_route(struct bat_priv *bat_priv,
93 134
94 /* route deleted */ 135 /* route deleted */
95 if ((curr_router) && (!neigh_node)) { 136 if ((curr_router) && (!neigh_node)) {
96
97 bat_dbg(DBG_ROUTES, bat_priv, "Deleting route towards: %pM\n", 137 bat_dbg(DBG_ROUTES, bat_priv, "Deleting route towards: %pM\n",
98 orig_node->orig); 138 orig_node->orig);
99 tt_global_del_orig(bat_priv, orig_node, 139 tt_global_del_orig(bat_priv, orig_node,
100 "originator timed out"); 140 "Deleted route towards originator");
101 141
102 /* route added */ 142 /* route added */
103 } else if ((!curr_router) && (neigh_node)) { 143 } else if ((!curr_router) && (neigh_node)) {
@@ -105,11 +145,8 @@ static void update_route(struct bat_priv *bat_priv,
105 bat_dbg(DBG_ROUTES, bat_priv, 145 bat_dbg(DBG_ROUTES, bat_priv,
106 "Adding route towards: %pM (via %pM)\n", 146 "Adding route towards: %pM (via %pM)\n",
107 orig_node->orig, neigh_node->addr); 147 orig_node->orig, neigh_node->addr);
108 tt_global_add_orig(bat_priv, orig_node,
109 tt_buff, tt_buff_len);
110
111 /* route changed */ 148 /* route changed */
112 } else { 149 } else if (neigh_node && curr_router) {
113 bat_dbg(DBG_ROUTES, bat_priv, 150 bat_dbg(DBG_ROUTES, bat_priv,
114 "Changing route towards: %pM " 151 "Changing route towards: %pM "
115 "(now via %pM - was via %pM)\n", 152 "(now via %pM - was via %pM)\n",
@@ -133,10 +170,8 @@ static void update_route(struct bat_priv *bat_priv,
133 neigh_node_free_ref(curr_router); 170 neigh_node_free_ref(curr_router);
134} 171}
135 172
136
137void update_routes(struct bat_priv *bat_priv, struct orig_node *orig_node, 173void update_routes(struct bat_priv *bat_priv, struct orig_node *orig_node,
138 struct neigh_node *neigh_node, unsigned char *tt_buff, 174 struct neigh_node *neigh_node)
139 int tt_buff_len)
140{ 175{
141 struct neigh_node *router = NULL; 176 struct neigh_node *router = NULL;
142 177
@@ -146,11 +181,7 @@ void update_routes(struct bat_priv *bat_priv, struct orig_node *orig_node,
146 router = orig_node_get_router(orig_node); 181 router = orig_node_get_router(orig_node);
147 182
148 if (router != neigh_node) 183 if (router != neigh_node)
149 update_route(bat_priv, orig_node, neigh_node, 184 update_route(bat_priv, orig_node, neigh_node);
150 tt_buff, tt_buff_len);
151 /* may be just TT changed */
152 else
153 update_TT(bat_priv, orig_node, tt_buff, tt_buff_len);
154 185
155out: 186out:
156 if (router) 187 if (router)
@@ -165,7 +196,7 @@ static int is_bidirectional_neigh(struct orig_node *orig_node,
165 struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface); 196 struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
166 struct neigh_node *neigh_node = NULL, *tmp_neigh_node; 197 struct neigh_node *neigh_node = NULL, *tmp_neigh_node;
167 struct hlist_node *node; 198 struct hlist_node *node;
168 unsigned char total_count; 199 uint8_t total_count;
169 uint8_t orig_eq_count, neigh_rq_count, tq_own; 200 uint8_t orig_eq_count, neigh_rq_count, tq_own;
170 int tq_asym_penalty, ret = 0; 201 int tq_asym_penalty, ret = 0;
171 202
@@ -348,9 +379,9 @@ out:
348} 379}
349 380
350/* copy primary address for bonding */ 381/* copy primary address for bonding */
351static void bonding_save_primary(struct orig_node *orig_node, 382static void bonding_save_primary(const struct orig_node *orig_node,
352 struct orig_node *orig_neigh_node, 383 struct orig_node *orig_neigh_node,
353 struct batman_packet *batman_packet) 384 const struct batman_packet *batman_packet)
354{ 385{
355 if (!(batman_packet->flags & PRIMARIES_FIRST_HOP)) 386 if (!(batman_packet->flags & PRIMARIES_FIRST_HOP))
356 return; 387 return;
@@ -358,19 +389,16 @@ static void bonding_save_primary(struct orig_node *orig_node,
358 memcpy(orig_neigh_node->primary_addr, orig_node->orig, ETH_ALEN); 389 memcpy(orig_neigh_node->primary_addr, orig_node->orig, ETH_ALEN);
359} 390}
360 391
361static void update_orig(struct bat_priv *bat_priv, 392static void update_orig(struct bat_priv *bat_priv, struct orig_node *orig_node,
362 struct orig_node *orig_node, 393 const struct ethhdr *ethhdr,
363 struct ethhdr *ethhdr, 394 const struct batman_packet *batman_packet,
364 struct batman_packet *batman_packet,
365 struct hard_iface *if_incoming, 395 struct hard_iface *if_incoming,
366 unsigned char *tt_buff, int tt_buff_len, 396 const unsigned char *tt_buff, int is_duplicate)
367 char is_duplicate)
368{ 397{
369 struct neigh_node *neigh_node = NULL, *tmp_neigh_node = NULL; 398 struct neigh_node *neigh_node = NULL, *tmp_neigh_node = NULL;
370 struct neigh_node *router = NULL; 399 struct neigh_node *router = NULL;
371 struct orig_node *orig_node_tmp; 400 struct orig_node *orig_node_tmp;
372 struct hlist_node *node; 401 struct hlist_node *node;
373 int tmp_tt_buff_len;
374 uint8_t bcast_own_sum_orig, bcast_own_sum_neigh; 402 uint8_t bcast_own_sum_orig, bcast_own_sum_neigh;
375 403
376 bat_dbg(DBG_BATMAN, bat_priv, "update_originator(): " 404 bat_dbg(DBG_BATMAN, bat_priv, "update_originator(): "
@@ -435,9 +463,6 @@ static void update_orig(struct bat_priv *bat_priv,
435 463
436 bonding_candidate_add(orig_node, neigh_node); 464 bonding_candidate_add(orig_node, neigh_node);
437 465
438 tmp_tt_buff_len = (tt_buff_len > batman_packet->num_tt * ETH_ALEN ?
439 batman_packet->num_tt * ETH_ALEN : tt_buff_len);
440
441 /* if this neighbor already is our next hop there is nothing 466 /* if this neighbor already is our next hop there is nothing
442 * to change */ 467 * to change */
443 router = orig_node_get_router(orig_node); 468 router = orig_node_get_router(orig_node);
@@ -467,15 +492,19 @@ static void update_orig(struct bat_priv *bat_priv,
467 goto update_tt; 492 goto update_tt;
468 } 493 }
469 494
470 update_routes(bat_priv, orig_node, neigh_node, 495 update_routes(bat_priv, orig_node, neigh_node);
471 tt_buff, tmp_tt_buff_len);
472 goto update_gw;
473 496
474update_tt: 497update_tt:
475 update_routes(bat_priv, orig_node, router, 498 /* I have to check for transtable changes only if the OGM has been
476 tt_buff, tmp_tt_buff_len); 499 * sent through a primary interface */
500 if (((batman_packet->orig != ethhdr->h_source) &&
501 (batman_packet->ttl > 2)) ||
502 (batman_packet->flags & PRIMARIES_FIRST_HOP))
503 update_transtable(bat_priv, orig_node, tt_buff,
504 batman_packet->tt_num_changes,
505 batman_packet->ttvn,
506 batman_packet->tt_crc);
477 507
478update_gw:
479 if (orig_node->gw_flags != batman_packet->gw_flags) 508 if (orig_node->gw_flags != batman_packet->gw_flags)
480 gw_node_update(bat_priv, orig_node, batman_packet->gw_flags); 509 gw_node_update(bat_priv, orig_node, batman_packet->gw_flags);
481 510
@@ -531,15 +560,15 @@ static int window_protected(struct bat_priv *bat_priv,
531 * -1 the packet is old and has been received while the seqno window 560 * -1 the packet is old and has been received while the seqno window
532 * was protected. Caller should drop it. 561 * was protected. Caller should drop it.
533 */ 562 */
534static char count_real_packets(struct ethhdr *ethhdr, 563static int count_real_packets(const struct ethhdr *ethhdr,
535 struct batman_packet *batman_packet, 564 const struct batman_packet *batman_packet,
536 struct hard_iface *if_incoming) 565 const struct hard_iface *if_incoming)
537{ 566{
538 struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface); 567 struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
539 struct orig_node *orig_node; 568 struct orig_node *orig_node;
540 struct neigh_node *tmp_neigh_node; 569 struct neigh_node *tmp_neigh_node;
541 struct hlist_node *node; 570 struct hlist_node *node;
542 char is_duplicate = 0; 571 int is_duplicate = 0;
543 int32_t seq_diff; 572 int32_t seq_diff;
544 int need_update = 0; 573 int need_update = 0;
545 int set_mark, ret = -1; 574 int set_mark, ret = -1;
@@ -595,9 +624,9 @@ out:
595 return ret; 624 return ret;
596} 625}
597 626
598void receive_bat_packet(struct ethhdr *ethhdr, 627void receive_bat_packet(const struct ethhdr *ethhdr,
599 struct batman_packet *batman_packet, 628 struct batman_packet *batman_packet,
600 unsigned char *tt_buff, int tt_buff_len, 629 const unsigned char *tt_buff,
601 struct hard_iface *if_incoming) 630 struct hard_iface *if_incoming)
602{ 631{
603 struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface); 632 struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
@@ -605,10 +634,10 @@ void receive_bat_packet(struct ethhdr *ethhdr,
605 struct orig_node *orig_neigh_node, *orig_node; 634 struct orig_node *orig_neigh_node, *orig_node;
606 struct neigh_node *router = NULL, *router_router = NULL; 635 struct neigh_node *router = NULL, *router_router = NULL;
607 struct neigh_node *orig_neigh_router = NULL; 636 struct neigh_node *orig_neigh_router = NULL;
608 char has_directlink_flag; 637 int has_directlink_flag;
609 char is_my_addr = 0, is_my_orig = 0, is_my_oldorig = 0; 638 int is_my_addr = 0, is_my_orig = 0, is_my_oldorig = 0;
610 char is_broadcast = 0, is_bidirectional, is_single_hop_neigh; 639 int is_broadcast = 0, is_bidirectional, is_single_hop_neigh;
611 char is_duplicate; 640 int is_duplicate;
612 uint32_t if_incoming_seqno; 641 uint32_t if_incoming_seqno;
613 642
614 /* Silently drop when the batman packet is actually not a 643 /* Silently drop when the batman packet is actually not a
@@ -636,12 +665,14 @@ void receive_bat_packet(struct ethhdr *ethhdr,
636 665
637 bat_dbg(DBG_BATMAN, bat_priv, 666 bat_dbg(DBG_BATMAN, bat_priv,
638 "Received BATMAN packet via NB: %pM, IF: %s [%pM] " 667 "Received BATMAN packet via NB: %pM, IF: %s [%pM] "
639 "(from OG: %pM, via prev OG: %pM, seqno %d, tq %d, " 668 "(from OG: %pM, via prev OG: %pM, seqno %d, ttvn %u, "
640 "TTL %d, V %d, IDF %d)\n", 669 "crc %u, changes %u, td %d, TTL %d, V %d, IDF %d)\n",
641 ethhdr->h_source, if_incoming->net_dev->name, 670 ethhdr->h_source, if_incoming->net_dev->name,
642 if_incoming->net_dev->dev_addr, batman_packet->orig, 671 if_incoming->net_dev->dev_addr, batman_packet->orig,
643 batman_packet->prev_sender, batman_packet->seqno, 672 batman_packet->prev_sender, batman_packet->seqno,
644 batman_packet->tq, batman_packet->ttl, batman_packet->version, 673 batman_packet->ttvn, batman_packet->tt_crc,
674 batman_packet->tt_num_changes, batman_packet->tq,
675 batman_packet->ttl, batman_packet->version,
645 has_directlink_flag); 676 has_directlink_flag);
646 677
647 rcu_read_lock(); 678 rcu_read_lock();
@@ -664,7 +695,7 @@ void receive_bat_packet(struct ethhdr *ethhdr,
664 hard_iface->net_dev->dev_addr)) 695 hard_iface->net_dev->dev_addr))
665 is_my_oldorig = 1; 696 is_my_oldorig = 1;
666 697
667 if (compare_eth(ethhdr->h_source, broadcast_addr)) 698 if (is_broadcast_ether_addr(ethhdr->h_source))
668 is_broadcast = 1; 699 is_broadcast = 1;
669 } 700 }
670 rcu_read_unlock(); 701 rcu_read_unlock();
@@ -701,17 +732,16 @@ void receive_bat_packet(struct ethhdr *ethhdr,
701 732
702 /* neighbor has to indicate direct link and it has to 733 /* neighbor has to indicate direct link and it has to
703 * come via the corresponding interface */ 734 * come via the corresponding interface */
704 /* if received seqno equals last send seqno save new 735 /* save packet seqno for bidirectional check */
705 * seqno for bidirectional check */
706 if (has_directlink_flag && 736 if (has_directlink_flag &&
707 compare_eth(if_incoming->net_dev->dev_addr, 737 compare_eth(if_incoming->net_dev->dev_addr,
708 batman_packet->orig) && 738 batman_packet->orig)) {
709 (batman_packet->seqno - if_incoming_seqno + 2 == 0)) {
710 offset = if_incoming->if_num * NUM_WORDS; 739 offset = if_incoming->if_num * NUM_WORDS;
711 740
712 spin_lock_bh(&orig_neigh_node->ogm_cnt_lock); 741 spin_lock_bh(&orig_neigh_node->ogm_cnt_lock);
713 word = &(orig_neigh_node->bcast_own[offset]); 742 word = &(orig_neigh_node->bcast_own[offset]);
714 bit_mark(word, 0); 743 bit_mark(word,
744 if_incoming_seqno - batman_packet->seqno - 2);
715 orig_neigh_node->bcast_own_sum[if_incoming->if_num] = 745 orig_neigh_node->bcast_own_sum[if_incoming->if_num] =
716 bit_packet_count(word); 746 bit_packet_count(word);
717 spin_unlock_bh(&orig_neigh_node->ogm_cnt_lock); 747 spin_unlock_bh(&orig_neigh_node->ogm_cnt_lock);
@@ -794,14 +824,14 @@ void receive_bat_packet(struct ethhdr *ethhdr,
794 ((orig_node->last_real_seqno == batman_packet->seqno) && 824 ((orig_node->last_real_seqno == batman_packet->seqno) &&
795 (orig_node->last_ttl - 3 <= batman_packet->ttl)))) 825 (orig_node->last_ttl - 3 <= batman_packet->ttl))))
796 update_orig(bat_priv, orig_node, ethhdr, batman_packet, 826 update_orig(bat_priv, orig_node, ethhdr, batman_packet,
797 if_incoming, tt_buff, tt_buff_len, is_duplicate); 827 if_incoming, tt_buff, is_duplicate);
798 828
799 /* is single hop (direct) neighbor */ 829 /* is single hop (direct) neighbor */
800 if (is_single_hop_neigh) { 830 if (is_single_hop_neigh) {
801 831
802 /* mark direct link on incoming interface */ 832 /* mark direct link on incoming interface */
803 schedule_forward_packet(orig_node, ethhdr, batman_packet, 833 schedule_forward_packet(orig_node, ethhdr, batman_packet,
804 1, tt_buff_len, if_incoming); 834 1, if_incoming);
805 835
806 bat_dbg(DBG_BATMAN, bat_priv, "Forwarding packet: " 836 bat_dbg(DBG_BATMAN, bat_priv, "Forwarding packet: "
807 "rebroadcast neighbor packet with direct link flag\n"); 837 "rebroadcast neighbor packet with direct link flag\n");
@@ -824,7 +854,7 @@ void receive_bat_packet(struct ethhdr *ethhdr,
824 bat_dbg(DBG_BATMAN, bat_priv, 854 bat_dbg(DBG_BATMAN, bat_priv,
825 "Forwarding packet: rebroadcast originator packet\n"); 855 "Forwarding packet: rebroadcast originator packet\n");
826 schedule_forward_packet(orig_node, ethhdr, batman_packet, 856 schedule_forward_packet(orig_node, ethhdr, batman_packet,
827 0, tt_buff_len, if_incoming); 857 0, if_incoming);
828 858
829out_neigh: 859out_neigh:
830 if ((orig_neigh_node) && (!is_single_hop_neigh)) 860 if ((orig_neigh_node) && (!is_single_hop_neigh))
@@ -1077,7 +1107,7 @@ out:
1077 * This method rotates the bonding list and increases the 1107 * This method rotates the bonding list and increases the
1078 * returned router's refcount. */ 1108 * returned router's refcount. */
1079static struct neigh_node *find_bond_router(struct orig_node *primary_orig, 1109static struct neigh_node *find_bond_router(struct orig_node *primary_orig,
1080 struct hard_iface *recv_if) 1110 const struct hard_iface *recv_if)
1081{ 1111{
1082 struct neigh_node *tmp_neigh_node; 1112 struct neigh_node *tmp_neigh_node;
1083 struct neigh_node *router = NULL, *first_candidate = NULL; 1113 struct neigh_node *router = NULL, *first_candidate = NULL;
@@ -1128,7 +1158,7 @@ out:
1128 * 1158 *
1129 * Increases the returned router's refcount */ 1159 * Increases the returned router's refcount */
1130static struct neigh_node *find_ifalter_router(struct orig_node *primary_orig, 1160static struct neigh_node *find_ifalter_router(struct orig_node *primary_orig,
1131 struct hard_iface *recv_if) 1161 const struct hard_iface *recv_if)
1132{ 1162{
1133 struct neigh_node *tmp_neigh_node; 1163 struct neigh_node *tmp_neigh_node;
1134 struct neigh_node *router = NULL, *first_candidate = NULL; 1164 struct neigh_node *router = NULL, *first_candidate = NULL;
@@ -1171,12 +1201,124 @@ static struct neigh_node *find_ifalter_router(struct orig_node *primary_orig,
1171 return router; 1201 return router;
1172} 1202}
1173 1203
1204int recv_tt_query(struct sk_buff *skb, struct hard_iface *recv_if)
1205{
1206 struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface);
1207 struct tt_query_packet *tt_query;
1208 struct ethhdr *ethhdr;
1209
1210 /* drop packet if it has not necessary minimum size */
1211 if (unlikely(!pskb_may_pull(skb, sizeof(struct tt_query_packet))))
1212 goto out;
1213
1214 /* I could need to modify it */
1215 if (skb_cow(skb, sizeof(struct tt_query_packet)) < 0)
1216 goto out;
1217
1218 ethhdr = (struct ethhdr *)skb_mac_header(skb);
1219
1220 /* packet with unicast indication but broadcast recipient */
1221 if (is_broadcast_ether_addr(ethhdr->h_dest))
1222 goto out;
1223
1224 /* packet with broadcast sender address */
1225 if (is_broadcast_ether_addr(ethhdr->h_source))
1226 goto out;
1227
1228 tt_query = (struct tt_query_packet *)skb->data;
1229
1230 tt_query->tt_data = ntohs(tt_query->tt_data);
1231
1232 switch (tt_query->flags & TT_QUERY_TYPE_MASK) {
1233 case TT_REQUEST:
1234 /* If we cannot provide an answer the tt_request is
1235 * forwarded */
1236 if (!send_tt_response(bat_priv, tt_query)) {
1237 bat_dbg(DBG_TT, bat_priv,
1238 "Routing TT_REQUEST to %pM [%c]\n",
1239 tt_query->dst,
1240 (tt_query->flags & TT_FULL_TABLE ? 'F' : '.'));
1241 tt_query->tt_data = htons(tt_query->tt_data);
1242 return route_unicast_packet(skb, recv_if);
1243 }
1244 break;
1245 case TT_RESPONSE:
1246 /* packet needs to be linearised to access the TT changes */
1247 if (skb_linearize(skb) < 0)
1248 goto out;
1249
1250 if (is_my_mac(tt_query->dst))
1251 handle_tt_response(bat_priv, tt_query);
1252 else {
1253 bat_dbg(DBG_TT, bat_priv,
1254 "Routing TT_RESPONSE to %pM [%c]\n",
1255 tt_query->dst,
1256 (tt_query->flags & TT_FULL_TABLE ? 'F' : '.'));
1257 tt_query->tt_data = htons(tt_query->tt_data);
1258 return route_unicast_packet(skb, recv_if);
1259 }
1260 break;
1261 }
1262
1263out:
1264 /* returning NET_RX_DROP will make the caller function kfree the skb */
1265 return NET_RX_DROP;
1266}
1267
1268int recv_roam_adv(struct sk_buff *skb, struct hard_iface *recv_if)
1269{
1270 struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface);
1271 struct roam_adv_packet *roam_adv_packet;
1272 struct orig_node *orig_node;
1273 struct ethhdr *ethhdr;
1274
1275 /* drop packet if it has not necessary minimum size */
1276 if (unlikely(!pskb_may_pull(skb, sizeof(struct roam_adv_packet))))
1277 goto out;
1278
1279 ethhdr = (struct ethhdr *)skb_mac_header(skb);
1280
1281 /* packet with unicast indication but broadcast recipient */
1282 if (is_broadcast_ether_addr(ethhdr->h_dest))
1283 goto out;
1284
1285 /* packet with broadcast sender address */
1286 if (is_broadcast_ether_addr(ethhdr->h_source))
1287 goto out;
1288
1289 roam_adv_packet = (struct roam_adv_packet *)skb->data;
1290
1291 if (!is_my_mac(roam_adv_packet->dst))
1292 return route_unicast_packet(skb, recv_if);
1293
1294 orig_node = orig_hash_find(bat_priv, roam_adv_packet->src);
1295 if (!orig_node)
1296 goto out;
1297
1298 bat_dbg(DBG_TT, bat_priv, "Received ROAMING_ADV from %pM "
1299 "(client %pM)\n", roam_adv_packet->src,
1300 roam_adv_packet->client);
1301
1302 tt_global_add(bat_priv, orig_node, roam_adv_packet->client,
1303 atomic_read(&orig_node->last_ttvn) + 1, true);
1304
1305 /* Roaming phase starts: I have new information but the ttvn has not
1306 * been incremented yet. This flag will make me check all the incoming
1307 * packets for the correct destination. */
1308 bat_priv->tt_poss_change = true;
1309
1310 orig_node_free_ref(orig_node);
1311out:
1312 /* returning NET_RX_DROP will make the caller function kfree the skb */
1313 return NET_RX_DROP;
1314}
1315
1174/* find a suitable router for this originator, and use 1316/* find a suitable router for this originator, and use
1175 * bonding if possible. increases the found neighbors 1317 * bonding if possible. increases the found neighbors
1176 * refcount.*/ 1318 * refcount.*/
1177struct neigh_node *find_router(struct bat_priv *bat_priv, 1319struct neigh_node *find_router(struct bat_priv *bat_priv,
1178 struct orig_node *orig_node, 1320 struct orig_node *orig_node,
1179 struct hard_iface *recv_if) 1321 const struct hard_iface *recv_if)
1180{ 1322{
1181 struct orig_node *primary_orig_node; 1323 struct orig_node *primary_orig_node;
1182 struct orig_node *router_orig; 1324 struct orig_node *router_orig;
@@ -1240,6 +1382,9 @@ struct neigh_node *find_router(struct bat_priv *bat_priv,
1240 router = find_ifalter_router(primary_orig_node, recv_if); 1382 router = find_ifalter_router(primary_orig_node, recv_if);
1241 1383
1242return_router: 1384return_router:
1385 if (router && router->if_incoming->if_status != IF_ACTIVE)
1386 goto err_unlock;
1387
1243 rcu_read_unlock(); 1388 rcu_read_unlock();
1244 return router; 1389 return router;
1245err_unlock: 1390err_unlock:
@@ -1354,14 +1499,84 @@ out:
1354 return ret; 1499 return ret;
1355} 1500}
1356 1501
1502static int check_unicast_ttvn(struct bat_priv *bat_priv,
1503 struct sk_buff *skb) {
1504 uint8_t curr_ttvn;
1505 struct orig_node *orig_node;
1506 struct ethhdr *ethhdr;
1507 struct hard_iface *primary_if;
1508 struct unicast_packet *unicast_packet;
1509 bool tt_poss_change;
1510
1511 /* I could need to modify it */
1512 if (skb_cow(skb, sizeof(struct unicast_packet)) < 0)
1513 return 0;
1514
1515 unicast_packet = (struct unicast_packet *)skb->data;
1516
1517 if (is_my_mac(unicast_packet->dest)) {
1518 tt_poss_change = bat_priv->tt_poss_change;
1519 curr_ttvn = (uint8_t)atomic_read(&bat_priv->ttvn);
1520 } else {
1521 orig_node = orig_hash_find(bat_priv, unicast_packet->dest);
1522
1523 if (!orig_node)
1524 return 0;
1525
1526 curr_ttvn = (uint8_t)atomic_read(&orig_node->last_ttvn);
1527 tt_poss_change = orig_node->tt_poss_change;
1528 orig_node_free_ref(orig_node);
1529 }
1530
1531 /* Check whether I have to reroute the packet */
1532 if (seq_before(unicast_packet->ttvn, curr_ttvn) || tt_poss_change) {
1533 /* Linearize the skb before accessing it */
1534 if (skb_linearize(skb) < 0)
1535 return 0;
1536
1537 ethhdr = (struct ethhdr *)(skb->data +
1538 sizeof(struct unicast_packet));
1539 orig_node = transtable_search(bat_priv, ethhdr->h_dest);
1540
1541 if (!orig_node) {
1542 if (!is_my_client(bat_priv, ethhdr->h_dest))
1543 return 0;
1544 primary_if = primary_if_get_selected(bat_priv);
1545 if (!primary_if)
1546 return 0;
1547 memcpy(unicast_packet->dest,
1548 primary_if->net_dev->dev_addr, ETH_ALEN);
1549 hardif_free_ref(primary_if);
1550 } else {
1551 memcpy(unicast_packet->dest, orig_node->orig,
1552 ETH_ALEN);
1553 curr_ttvn = (uint8_t)
1554 atomic_read(&orig_node->last_ttvn);
1555 orig_node_free_ref(orig_node);
1556 }
1557
1558 bat_dbg(DBG_ROUTES, bat_priv, "TTVN mismatch (old_ttvn %u "
1559 "new_ttvn %u)! Rerouting unicast packet (for %pM) to "
1560 "%pM\n", unicast_packet->ttvn, curr_ttvn,
1561 ethhdr->h_dest, unicast_packet->dest);
1562
1563 unicast_packet->ttvn = curr_ttvn;
1564 }
1565 return 1;
1566}
1567
1357int recv_unicast_packet(struct sk_buff *skb, struct hard_iface *recv_if) 1568int recv_unicast_packet(struct sk_buff *skb, struct hard_iface *recv_if)
1358{ 1569{
1570 struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface);
1359 struct unicast_packet *unicast_packet; 1571 struct unicast_packet *unicast_packet;
1360 int hdr_size = sizeof(struct unicast_packet); 1572 int hdr_size = sizeof(*unicast_packet);
1361 1573
1362 if (check_unicast_packet(skb, hdr_size) < 0) 1574 if (check_unicast_packet(skb, hdr_size) < 0)
1363 return NET_RX_DROP; 1575 return NET_RX_DROP;
1364 1576
1577 if (!check_unicast_ttvn(bat_priv, skb))
1578 return NET_RX_DROP;
1579
1365 unicast_packet = (struct unicast_packet *)skb->data; 1580 unicast_packet = (struct unicast_packet *)skb->data;
1366 1581
1367 /* packet for me */ 1582 /* packet for me */
@@ -1377,13 +1592,16 @@ int recv_ucast_frag_packet(struct sk_buff *skb, struct hard_iface *recv_if)
1377{ 1592{
1378 struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface); 1593 struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface);
1379 struct unicast_frag_packet *unicast_packet; 1594 struct unicast_frag_packet *unicast_packet;
1380 int hdr_size = sizeof(struct unicast_frag_packet); 1595 int hdr_size = sizeof(*unicast_packet);
1381 struct sk_buff *new_skb = NULL; 1596 struct sk_buff *new_skb = NULL;
1382 int ret; 1597 int ret;
1383 1598
1384 if (check_unicast_packet(skb, hdr_size) < 0) 1599 if (check_unicast_packet(skb, hdr_size) < 0)
1385 return NET_RX_DROP; 1600 return NET_RX_DROP;
1386 1601
1602 if (!check_unicast_ttvn(bat_priv, skb))
1603 return NET_RX_DROP;
1604
1387 unicast_packet = (struct unicast_frag_packet *)skb->data; 1605 unicast_packet = (struct unicast_frag_packet *)skb->data;
1388 1606
1389 /* packet for me */ 1607 /* packet for me */
@@ -1413,7 +1631,7 @@ int recv_bcast_packet(struct sk_buff *skb, struct hard_iface *recv_if)
1413 struct orig_node *orig_node = NULL; 1631 struct orig_node *orig_node = NULL;
1414 struct bcast_packet *bcast_packet; 1632 struct bcast_packet *bcast_packet;
1415 struct ethhdr *ethhdr; 1633 struct ethhdr *ethhdr;
1416 int hdr_size = sizeof(struct bcast_packet); 1634 int hdr_size = sizeof(*bcast_packet);
1417 int ret = NET_RX_DROP; 1635 int ret = NET_RX_DROP;
1418 int32_t seq_diff; 1636 int32_t seq_diff;
1419 1637
@@ -1471,7 +1689,7 @@ int recv_bcast_packet(struct sk_buff *skb, struct hard_iface *recv_if)
1471 spin_unlock_bh(&orig_node->bcast_seqno_lock); 1689 spin_unlock_bh(&orig_node->bcast_seqno_lock);
1472 1690
1473 /* rebroadcast packet */ 1691 /* rebroadcast packet */
1474 add_bcast_packet_to_list(bat_priv, skb); 1692 add_bcast_packet_to_list(bat_priv, skb, 1);
1475 1693
1476 /* broadcast for me */ 1694 /* broadcast for me */
1477 interface_rx(recv_if->soft_iface, skb, recv_if, hdr_size); 1695 interface_rx(recv_if->soft_iface, skb, recv_if, hdr_size);
@@ -1491,7 +1709,7 @@ int recv_vis_packet(struct sk_buff *skb, struct hard_iface *recv_if)
1491 struct vis_packet *vis_packet; 1709 struct vis_packet *vis_packet;
1492 struct ethhdr *ethhdr; 1710 struct ethhdr *ethhdr;
1493 struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface); 1711 struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface);
1494 int hdr_size = sizeof(struct vis_packet); 1712 int hdr_size = sizeof(*vis_packet);
1495 1713
1496 /* keep skb linear */ 1714 /* keep skb linear */
1497 if (skb_linearize(skb) < 0) 1715 if (skb_linearize(skb) < 0)
diff --git a/net/batman-adv/routing.h b/net/batman-adv/routing.h
index 870f29842b2..fb14e9579b1 100644
--- a/net/batman-adv/routing.h
+++ b/net/batman-adv/routing.h
@@ -23,13 +23,12 @@
23#define _NET_BATMAN_ADV_ROUTING_H_ 23#define _NET_BATMAN_ADV_ROUTING_H_
24 24
25void slide_own_bcast_window(struct hard_iface *hard_iface); 25void slide_own_bcast_window(struct hard_iface *hard_iface);
26void receive_bat_packet(struct ethhdr *ethhdr, 26void receive_bat_packet(const struct ethhdr *ethhdr,
27 struct batman_packet *batman_packet, 27 struct batman_packet *batman_packet,
28 unsigned char *tt_buff, int tt_buff_len, 28 const unsigned char *tt_buff,
29 struct hard_iface *if_incoming); 29 struct hard_iface *if_incoming);
30void update_routes(struct bat_priv *bat_priv, struct orig_node *orig_node, 30void update_routes(struct bat_priv *bat_priv, struct orig_node *orig_node,
31 struct neigh_node *neigh_node, unsigned char *tt_buff, 31 struct neigh_node *neigh_node);
32 int tt_buff_len);
33int route_unicast_packet(struct sk_buff *skb, struct hard_iface *recv_if); 32int route_unicast_packet(struct sk_buff *skb, struct hard_iface *recv_if);
34int recv_icmp_packet(struct sk_buff *skb, struct hard_iface *recv_if); 33int recv_icmp_packet(struct sk_buff *skb, struct hard_iface *recv_if);
35int recv_unicast_packet(struct sk_buff *skb, struct hard_iface *recv_if); 34int recv_unicast_packet(struct sk_buff *skb, struct hard_iface *recv_if);
@@ -37,9 +36,11 @@ int recv_ucast_frag_packet(struct sk_buff *skb, struct hard_iface *recv_if);
37int recv_bcast_packet(struct sk_buff *skb, struct hard_iface *recv_if); 36int recv_bcast_packet(struct sk_buff *skb, struct hard_iface *recv_if);
38int recv_vis_packet(struct sk_buff *skb, struct hard_iface *recv_if); 37int recv_vis_packet(struct sk_buff *skb, struct hard_iface *recv_if);
39int recv_bat_packet(struct sk_buff *skb, struct hard_iface *recv_if); 38int recv_bat_packet(struct sk_buff *skb, struct hard_iface *recv_if);
39int recv_tt_query(struct sk_buff *skb, struct hard_iface *recv_if);
40int recv_roam_adv(struct sk_buff *skb, struct hard_iface *recv_if);
40struct neigh_node *find_router(struct bat_priv *bat_priv, 41struct neigh_node *find_router(struct bat_priv *bat_priv,
41 struct orig_node *orig_node, 42 struct orig_node *orig_node,
42 struct hard_iface *recv_if); 43 const struct hard_iface *recv_if);
43void bonding_candidate_del(struct orig_node *orig_node, 44void bonding_candidate_del(struct orig_node *orig_node,
44 struct neigh_node *neigh_node); 45 struct neigh_node *neigh_node);
45 46
diff --git a/net/batman-adv/send.c b/net/batman-adv/send.c
index 33779278f1b..58d14472068 100644
--- a/net/batman-adv/send.c
+++ b/net/batman-adv/send.c
@@ -33,14 +33,14 @@
33static void send_outstanding_bcast_packet(struct work_struct *work); 33static void send_outstanding_bcast_packet(struct work_struct *work);
34 34
35/* apply hop penalty for a normal link */ 35/* apply hop penalty for a normal link */
36static uint8_t hop_penalty(const uint8_t tq, struct bat_priv *bat_priv) 36static uint8_t hop_penalty(uint8_t tq, const struct bat_priv *bat_priv)
37{ 37{
38 int hop_penalty = atomic_read(&bat_priv->hop_penalty); 38 int hop_penalty = atomic_read(&bat_priv->hop_penalty);
39 return (tq * (TQ_MAX_VALUE - hop_penalty)) / (TQ_MAX_VALUE); 39 return (tq * (TQ_MAX_VALUE - hop_penalty)) / (TQ_MAX_VALUE);
40} 40}
41 41
42/* when do we schedule our own packet to be sent */ 42/* when do we schedule our own packet to be sent */
43static unsigned long own_send_time(struct bat_priv *bat_priv) 43static unsigned long own_send_time(const struct bat_priv *bat_priv)
44{ 44{
45 return jiffies + msecs_to_jiffies( 45 return jiffies + msecs_to_jiffies(
46 atomic_read(&bat_priv->orig_interval) - 46 atomic_read(&bat_priv->orig_interval) -
@@ -55,9 +55,8 @@ static unsigned long forward_send_time(void)
55 55
56/* send out an already prepared packet to the given address via the 56/* send out an already prepared packet to the given address via the
57 * specified batman interface */ 57 * specified batman interface */
58int send_skb_packet(struct sk_buff *skb, 58int send_skb_packet(struct sk_buff *skb, struct hard_iface *hard_iface,
59 struct hard_iface *hard_iface, 59 const uint8_t *dst_addr)
60 uint8_t *dst_addr)
61{ 60{
62 struct ethhdr *ethhdr; 61 struct ethhdr *ethhdr;
63 62
@@ -74,7 +73,7 @@ int send_skb_packet(struct sk_buff *skb,
74 } 73 }
75 74
76 /* push to the ethernet header. */ 75 /* push to the ethernet header. */
77 if (my_skb_head_push(skb, sizeof(struct ethhdr)) < 0) 76 if (my_skb_head_push(skb, sizeof(*ethhdr)) < 0)
78 goto send_skb_err; 77 goto send_skb_err;
79 78
80 skb_reset_mac_header(skb); 79 skb_reset_mac_header(skb);
@@ -121,7 +120,7 @@ static void send_packet_to_if(struct forw_packet *forw_packet,
121 /* adjust all flags and log packets */ 120 /* adjust all flags and log packets */
122 while (aggregated_packet(buff_pos, 121 while (aggregated_packet(buff_pos,
123 forw_packet->packet_len, 122 forw_packet->packet_len,
124 batman_packet->num_tt)) { 123 batman_packet->tt_num_changes)) {
125 124
126 /* we might have aggregated direct link packets with an 125 /* we might have aggregated direct link packets with an
127 * ordinary base packet */ 126 * ordinary base packet */
@@ -136,17 +135,17 @@ static void send_packet_to_if(struct forw_packet *forw_packet,
136 "Forwarding")); 135 "Forwarding"));
137 bat_dbg(DBG_BATMAN, bat_priv, 136 bat_dbg(DBG_BATMAN, bat_priv,
138 "%s %spacket (originator %pM, seqno %d, TQ %d, TTL %d," 137 "%s %spacket (originator %pM, seqno %d, TQ %d, TTL %d,"
139 " IDF %s) on interface %s [%pM]\n", 138 " IDF %s, hvn %d) on interface %s [%pM]\n",
140 fwd_str, (packet_num > 0 ? "aggregated " : ""), 139 fwd_str, (packet_num > 0 ? "aggregated " : ""),
141 batman_packet->orig, ntohl(batman_packet->seqno), 140 batman_packet->orig, ntohl(batman_packet->seqno),
142 batman_packet->tq, batman_packet->ttl, 141 batman_packet->tq, batman_packet->ttl,
143 (batman_packet->flags & DIRECTLINK ? 142 (batman_packet->flags & DIRECTLINK ?
144 "on" : "off"), 143 "on" : "off"),
145 hard_iface->net_dev->name, 144 batman_packet->ttvn, hard_iface->net_dev->name,
146 hard_iface->net_dev->dev_addr); 145 hard_iface->net_dev->dev_addr);
147 146
148 buff_pos += sizeof(struct batman_packet) + 147 buff_pos += sizeof(*batman_packet) +
149 (batman_packet->num_tt * ETH_ALEN); 148 tt_len(batman_packet->tt_num_changes);
150 packet_num++; 149 packet_num++;
151 batman_packet = (struct batman_packet *) 150 batman_packet = (struct batman_packet *)
152 (forw_packet->skb->data + buff_pos); 151 (forw_packet->skb->data + buff_pos);
@@ -164,26 +163,31 @@ static void send_packet(struct forw_packet *forw_packet)
164 struct hard_iface *hard_iface; 163 struct hard_iface *hard_iface;
165 struct net_device *soft_iface; 164 struct net_device *soft_iface;
166 struct bat_priv *bat_priv; 165 struct bat_priv *bat_priv;
166 struct hard_iface *primary_if = NULL;
167 struct batman_packet *batman_packet = 167 struct batman_packet *batman_packet =
168 (struct batman_packet *)(forw_packet->skb->data); 168 (struct batman_packet *)(forw_packet->skb->data);
169 unsigned char directlink = (batman_packet->flags & DIRECTLINK ? 1 : 0); 169 int directlink = (batman_packet->flags & DIRECTLINK ? 1 : 0);
170 170
171 if (!forw_packet->if_incoming) { 171 if (!forw_packet->if_incoming) {
172 pr_err("Error - can't forward packet: incoming iface not " 172 pr_err("Error - can't forward packet: incoming iface not "
173 "specified\n"); 173 "specified\n");
174 return; 174 goto out;
175 } 175 }
176 176
177 soft_iface = forw_packet->if_incoming->soft_iface; 177 soft_iface = forw_packet->if_incoming->soft_iface;
178 bat_priv = netdev_priv(soft_iface); 178 bat_priv = netdev_priv(soft_iface);
179 179
180 if (forw_packet->if_incoming->if_status != IF_ACTIVE) 180 if (forw_packet->if_incoming->if_status != IF_ACTIVE)
181 return; 181 goto out;
182
183 primary_if = primary_if_get_selected(bat_priv);
184 if (!primary_if)
185 goto out;
182 186
183 /* multihomed peer assumed */ 187 /* multihomed peer assumed */
184 /* non-primary OGMs are only broadcasted on their interface */ 188 /* non-primary OGMs are only broadcasted on their interface */
185 if ((directlink && (batman_packet->ttl == 1)) || 189 if ((directlink && (batman_packet->ttl == 1)) ||
186 (forw_packet->own && (forw_packet->if_incoming->if_num > 0))) { 190 (forw_packet->own && (forw_packet->if_incoming != primary_if))) {
187 191
188 /* FIXME: what about aggregated packets ? */ 192 /* FIXME: what about aggregated packets ? */
189 bat_dbg(DBG_BATMAN, bat_priv, 193 bat_dbg(DBG_BATMAN, bat_priv,
@@ -200,7 +204,7 @@ static void send_packet(struct forw_packet *forw_packet)
200 broadcast_addr); 204 broadcast_addr);
201 forw_packet->skb = NULL; 205 forw_packet->skb = NULL;
202 206
203 return; 207 goto out;
204 } 208 }
205 209
206 /* broadcast on every interface */ 210 /* broadcast on every interface */
@@ -212,28 +216,24 @@ static void send_packet(struct forw_packet *forw_packet)
212 send_packet_to_if(forw_packet, hard_iface); 216 send_packet_to_if(forw_packet, hard_iface);
213 } 217 }
214 rcu_read_unlock(); 218 rcu_read_unlock();
219
220out:
221 if (primary_if)
222 hardif_free_ref(primary_if);
215} 223}
216 224
217static void rebuild_batman_packet(struct bat_priv *bat_priv, 225static void realloc_packet_buffer(struct hard_iface *hard_iface,
218 struct hard_iface *hard_iface) 226 int new_len)
219{ 227{
220 int new_len;
221 unsigned char *new_buff; 228 unsigned char *new_buff;
222 struct batman_packet *batman_packet; 229 struct batman_packet *batman_packet;
223 230
224 new_len = sizeof(struct batman_packet) +
225 (bat_priv->num_local_tt * ETH_ALEN);
226 new_buff = kmalloc(new_len, GFP_ATOMIC); 231 new_buff = kmalloc(new_len, GFP_ATOMIC);
227 232
228 /* keep old buffer if kmalloc should fail */ 233 /* keep old buffer if kmalloc should fail */
229 if (new_buff) { 234 if (new_buff) {
230 memcpy(new_buff, hard_iface->packet_buff, 235 memcpy(new_buff, hard_iface->packet_buff,
231 sizeof(struct batman_packet)); 236 sizeof(*batman_packet));
232 batman_packet = (struct batman_packet *)new_buff;
233
234 batman_packet->num_tt = tt_local_fill_buffer(bat_priv,
235 new_buff + sizeof(struct batman_packet),
236 new_len - sizeof(struct batman_packet));
237 237
238 kfree(hard_iface->packet_buff); 238 kfree(hard_iface->packet_buff);
239 hard_iface->packet_buff = new_buff; 239 hard_iface->packet_buff = new_buff;
@@ -241,6 +241,46 @@ static void rebuild_batman_packet(struct bat_priv *bat_priv,
241 } 241 }
242} 242}
243 243
244/* when calling this function (hard_iface == primary_if) has to be true */
245static void prepare_packet_buffer(struct bat_priv *bat_priv,
246 struct hard_iface *hard_iface)
247{
248 int new_len;
249 struct batman_packet *batman_packet;
250
251 new_len = BAT_PACKET_LEN +
252 tt_len((uint8_t)atomic_read(&bat_priv->tt_local_changes));
253
254 /* if we have too many changes for one packet don't send any
255 * and wait for the tt table request which will be fragmented */
256 if (new_len > hard_iface->soft_iface->mtu)
257 new_len = BAT_PACKET_LEN;
258
259 realloc_packet_buffer(hard_iface, new_len);
260 batman_packet = (struct batman_packet *)hard_iface->packet_buff;
261
262 atomic_set(&bat_priv->tt_crc, tt_local_crc(bat_priv));
263
264 /* reset the sending counter */
265 atomic_set(&bat_priv->tt_ogm_append_cnt, TT_OGM_APPEND_MAX);
266
267 batman_packet->tt_num_changes = tt_changes_fill_buffer(bat_priv,
268 hard_iface->packet_buff + BAT_PACKET_LEN,
269 hard_iface->packet_len - BAT_PACKET_LEN);
270
271}
272
273static void reset_packet_buffer(struct bat_priv *bat_priv,
274 struct hard_iface *hard_iface)
275{
276 struct batman_packet *batman_packet;
277
278 realloc_packet_buffer(hard_iface, BAT_PACKET_LEN);
279
280 batman_packet = (struct batman_packet *)hard_iface->packet_buff;
281 batman_packet->tt_num_changes = 0;
282}
283
244void schedule_own_packet(struct hard_iface *hard_iface) 284void schedule_own_packet(struct hard_iface *hard_iface)
245{ 285{
246 struct bat_priv *bat_priv = netdev_priv(hard_iface->soft_iface); 286 struct bat_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
@@ -266,14 +306,21 @@ void schedule_own_packet(struct hard_iface *hard_iface)
266 if (hard_iface->if_status == IF_TO_BE_ACTIVATED) 306 if (hard_iface->if_status == IF_TO_BE_ACTIVATED)
267 hard_iface->if_status = IF_ACTIVE; 307 hard_iface->if_status = IF_ACTIVE;
268 308
269 /* if local tt has changed and interface is a primary interface */ 309 if (hard_iface == primary_if) {
270 if ((atomic_read(&bat_priv->tt_local_changed)) && 310 /* if at least one change happened */
271 (hard_iface == primary_if)) 311 if (atomic_read(&bat_priv->tt_local_changes) > 0) {
272 rebuild_batman_packet(bat_priv, hard_iface); 312 tt_commit_changes(bat_priv);
313 prepare_packet_buffer(bat_priv, hard_iface);
314 }
315
316 /* if the changes have been sent enough times */
317 if (!atomic_dec_not_zero(&bat_priv->tt_ogm_append_cnt))
318 reset_packet_buffer(bat_priv, hard_iface);
319 }
273 320
274 /** 321 /**
275 * NOTE: packet_buff might just have been re-allocated in 322 * NOTE: packet_buff might just have been re-allocated in
276 * rebuild_batman_packet() 323 * prepare_packet_buffer() or in reset_packet_buffer()
277 */ 324 */
278 batman_packet = (struct batman_packet *)hard_iface->packet_buff; 325 batman_packet = (struct batman_packet *)hard_iface->packet_buff;
279 326
@@ -281,6 +328,9 @@ void schedule_own_packet(struct hard_iface *hard_iface)
281 batman_packet->seqno = 328 batman_packet->seqno =
282 htonl((uint32_t)atomic_read(&hard_iface->seqno)); 329 htonl((uint32_t)atomic_read(&hard_iface->seqno));
283 330
331 batman_packet->ttvn = atomic_read(&bat_priv->ttvn);
332 batman_packet->tt_crc = htons((uint16_t)atomic_read(&bat_priv->tt_crc));
333
284 if (vis_server == VIS_TYPE_SERVER_SYNC) 334 if (vis_server == VIS_TYPE_SERVER_SYNC)
285 batman_packet->flags |= VIS_SERVER; 335 batman_packet->flags |= VIS_SERVER;
286 else 336 else
@@ -291,7 +341,7 @@ void schedule_own_packet(struct hard_iface *hard_iface)
291 batman_packet->gw_flags = 341 batman_packet->gw_flags =
292 (uint8_t)atomic_read(&bat_priv->gw_bandwidth); 342 (uint8_t)atomic_read(&bat_priv->gw_bandwidth);
293 else 343 else
294 batman_packet->gw_flags = 0; 344 batman_packet->gw_flags = NO_FLAGS;
295 345
296 atomic_inc(&hard_iface->seqno); 346 atomic_inc(&hard_iface->seqno);
297 347
@@ -307,15 +357,16 @@ void schedule_own_packet(struct hard_iface *hard_iface)
307} 357}
308 358
309void schedule_forward_packet(struct orig_node *orig_node, 359void schedule_forward_packet(struct orig_node *orig_node,
310 struct ethhdr *ethhdr, 360 const struct ethhdr *ethhdr,
311 struct batman_packet *batman_packet, 361 struct batman_packet *batman_packet,
312 uint8_t directlink, int tt_buff_len, 362 int directlink,
313 struct hard_iface *if_incoming) 363 struct hard_iface *if_incoming)
314{ 364{
315 struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface); 365 struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
316 struct neigh_node *router; 366 struct neigh_node *router;
317 unsigned char in_tq, in_ttl, tq_avg = 0; 367 uint8_t in_tq, in_ttl, tq_avg = 0;
318 unsigned long send_time; 368 unsigned long send_time;
369 uint8_t tt_num_changes;
319 370
320 if (batman_packet->ttl <= 1) { 371 if (batman_packet->ttl <= 1) {
321 bat_dbg(DBG_BATMAN, bat_priv, "ttl exceeded\n"); 372 bat_dbg(DBG_BATMAN, bat_priv, "ttl exceeded\n");
@@ -326,6 +377,7 @@ void schedule_forward_packet(struct orig_node *orig_node,
326 377
327 in_tq = batman_packet->tq; 378 in_tq = batman_packet->tq;
328 in_ttl = batman_packet->ttl; 379 in_ttl = batman_packet->ttl;
380 tt_num_changes = batman_packet->tt_num_changes;
329 381
330 batman_packet->ttl--; 382 batman_packet->ttl--;
331 memcpy(batman_packet->prev_sender, ethhdr->h_source, ETH_ALEN); 383 memcpy(batman_packet->prev_sender, ethhdr->h_source, ETH_ALEN);
@@ -358,6 +410,7 @@ void schedule_forward_packet(struct orig_node *orig_node,
358 batman_packet->ttl); 410 batman_packet->ttl);
359 411
360 batman_packet->seqno = htonl(batman_packet->seqno); 412 batman_packet->seqno = htonl(batman_packet->seqno);
413 batman_packet->tt_crc = htons(batman_packet->tt_crc);
361 414
362 /* switch of primaries first hop flag when forwarding */ 415 /* switch of primaries first hop flag when forwarding */
363 batman_packet->flags &= ~PRIMARIES_FIRST_HOP; 416 batman_packet->flags &= ~PRIMARIES_FIRST_HOP;
@@ -369,7 +422,7 @@ void schedule_forward_packet(struct orig_node *orig_node,
369 send_time = forward_send_time(); 422 send_time = forward_send_time();
370 add_bat_packet_to_list(bat_priv, 423 add_bat_packet_to_list(bat_priv,
371 (unsigned char *)batman_packet, 424 (unsigned char *)batman_packet,
372 sizeof(struct batman_packet) + tt_buff_len, 425 sizeof(*batman_packet) + tt_len(tt_num_changes),
373 if_incoming, 0, send_time); 426 if_incoming, 0, send_time);
374} 427}
375 428
@@ -408,11 +461,13 @@ static void _add_bcast_packet_to_list(struct bat_priv *bat_priv,
408 * 461 *
409 * The skb is not consumed, so the caller should make sure that the 462 * The skb is not consumed, so the caller should make sure that the
410 * skb is freed. */ 463 * skb is freed. */
411int add_bcast_packet_to_list(struct bat_priv *bat_priv, struct sk_buff *skb) 464int add_bcast_packet_to_list(struct bat_priv *bat_priv,
465 const struct sk_buff *skb, unsigned long delay)
412{ 466{
413 struct hard_iface *primary_if = NULL; 467 struct hard_iface *primary_if = NULL;
414 struct forw_packet *forw_packet; 468 struct forw_packet *forw_packet;
415 struct bcast_packet *bcast_packet; 469 struct bcast_packet *bcast_packet;
470 struct sk_buff *newskb;
416 471
417 if (!atomic_dec_not_zero(&bat_priv->bcast_queue_left)) { 472 if (!atomic_dec_not_zero(&bat_priv->bcast_queue_left)) {
418 bat_dbg(DBG_BATMAN, bat_priv, "bcast packet queue full\n"); 473 bat_dbg(DBG_BATMAN, bat_priv, "bcast packet queue full\n");
@@ -423,28 +478,28 @@ int add_bcast_packet_to_list(struct bat_priv *bat_priv, struct sk_buff *skb)
423 if (!primary_if) 478 if (!primary_if)
424 goto out_and_inc; 479 goto out_and_inc;
425 480
426 forw_packet = kmalloc(sizeof(struct forw_packet), GFP_ATOMIC); 481 forw_packet = kmalloc(sizeof(*forw_packet), GFP_ATOMIC);
427 482
428 if (!forw_packet) 483 if (!forw_packet)
429 goto out_and_inc; 484 goto out_and_inc;
430 485
431 skb = skb_copy(skb, GFP_ATOMIC); 486 newskb = skb_copy(skb, GFP_ATOMIC);
432 if (!skb) 487 if (!newskb)
433 goto packet_free; 488 goto packet_free;
434 489
435 /* as we have a copy now, it is safe to decrease the TTL */ 490 /* as we have a copy now, it is safe to decrease the TTL */
436 bcast_packet = (struct bcast_packet *)skb->data; 491 bcast_packet = (struct bcast_packet *)newskb->data;
437 bcast_packet->ttl--; 492 bcast_packet->ttl--;
438 493
439 skb_reset_mac_header(skb); 494 skb_reset_mac_header(newskb);
440 495
441 forw_packet->skb = skb; 496 forw_packet->skb = newskb;
442 forw_packet->if_incoming = primary_if; 497 forw_packet->if_incoming = primary_if;
443 498
444 /* how often did we send the bcast packet ? */ 499 /* how often did we send the bcast packet ? */
445 forw_packet->num_packets = 0; 500 forw_packet->num_packets = 0;
446 501
447 _add_bcast_packet_to_list(bat_priv, forw_packet, 1); 502 _add_bcast_packet_to_list(bat_priv, forw_packet, delay);
448 return NETDEV_TX_OK; 503 return NETDEV_TX_OK;
449 504
450packet_free: 505packet_free:
@@ -537,7 +592,7 @@ out:
537} 592}
538 593
539void purge_outstanding_packets(struct bat_priv *bat_priv, 594void purge_outstanding_packets(struct bat_priv *bat_priv,
540 struct hard_iface *hard_iface) 595 const struct hard_iface *hard_iface)
541{ 596{
542 struct forw_packet *forw_packet; 597 struct forw_packet *forw_packet;
543 struct hlist_node *tmp_node, *safe_tmp_node; 598 struct hlist_node *tmp_node, *safe_tmp_node;
diff --git a/net/batman-adv/send.h b/net/batman-adv/send.h
index 247172d71e4..1f2d1e87766 100644
--- a/net/batman-adv/send.h
+++ b/net/batman-adv/send.h
@@ -22,18 +22,18 @@
22#ifndef _NET_BATMAN_ADV_SEND_H_ 22#ifndef _NET_BATMAN_ADV_SEND_H_
23#define _NET_BATMAN_ADV_SEND_H_ 23#define _NET_BATMAN_ADV_SEND_H_
24 24
25int send_skb_packet(struct sk_buff *skb, 25int send_skb_packet(struct sk_buff *skb, struct hard_iface *hard_iface,
26 struct hard_iface *hard_iface, 26 const uint8_t *dst_addr);
27 uint8_t *dst_addr);
28void schedule_own_packet(struct hard_iface *hard_iface); 27void schedule_own_packet(struct hard_iface *hard_iface);
29void schedule_forward_packet(struct orig_node *orig_node, 28void schedule_forward_packet(struct orig_node *orig_node,
30 struct ethhdr *ethhdr, 29 const struct ethhdr *ethhdr,
31 struct batman_packet *batman_packet, 30 struct batman_packet *batman_packet,
32 uint8_t directlink, int tt_buff_len, 31 int directlink,
33 struct hard_iface *if_outgoing); 32 struct hard_iface *if_outgoing);
34int add_bcast_packet_to_list(struct bat_priv *bat_priv, struct sk_buff *skb); 33int add_bcast_packet_to_list(struct bat_priv *bat_priv,
34 const struct sk_buff *skb, unsigned long delay);
35void send_outstanding_bat_packet(struct work_struct *work); 35void send_outstanding_bat_packet(struct work_struct *work);
36void purge_outstanding_packets(struct bat_priv *bat_priv, 36void purge_outstanding_packets(struct bat_priv *bat_priv,
37 struct hard_iface *hard_iface); 37 const struct hard_iface *hard_iface);
38 38
39#endif /* _NET_BATMAN_ADV_SEND_H_ */ 39#endif /* _NET_BATMAN_ADV_SEND_H_ */
diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c
index d5aa60999e8..05dd35114a2 100644
--- a/net/batman-adv/soft-interface.c
+++ b/net/batman-adv/soft-interface.c
@@ -30,6 +30,7 @@
30#include "gateway_common.h" 30#include "gateway_common.h"
31#include "gateway_client.h" 31#include "gateway_client.h"
32#include "bat_sysfs.h" 32#include "bat_sysfs.h"
33#include "originator.h"
33#include <linux/slab.h> 34#include <linux/slab.h>
34#include <linux/ethtool.h> 35#include <linux/ethtool.h>
35#include <linux/etherdevice.h> 36#include <linux/etherdevice.h>
@@ -123,8 +124,7 @@ static struct softif_neigh_vid *softif_neigh_vid_get(struct bat_priv *bat_priv,
123 goto out; 124 goto out;
124 } 125 }
125 126
126 softif_neigh_vid = kzalloc(sizeof(struct softif_neigh_vid), 127 softif_neigh_vid = kzalloc(sizeof(*softif_neigh_vid), GFP_ATOMIC);
127 GFP_ATOMIC);
128 if (!softif_neigh_vid) 128 if (!softif_neigh_vid)
129 goto out; 129 goto out;
130 130
@@ -146,7 +146,7 @@ out:
146} 146}
147 147
148static struct softif_neigh *softif_neigh_get(struct bat_priv *bat_priv, 148static struct softif_neigh *softif_neigh_get(struct bat_priv *bat_priv,
149 uint8_t *addr, short vid) 149 const uint8_t *addr, short vid)
150{ 150{
151 struct softif_neigh_vid *softif_neigh_vid; 151 struct softif_neigh_vid *softif_neigh_vid;
152 struct softif_neigh *softif_neigh = NULL; 152 struct softif_neigh *softif_neigh = NULL;
@@ -170,7 +170,7 @@ static struct softif_neigh *softif_neigh_get(struct bat_priv *bat_priv,
170 goto unlock; 170 goto unlock;
171 } 171 }
172 172
173 softif_neigh = kzalloc(sizeof(struct softif_neigh), GFP_ATOMIC); 173 softif_neigh = kzalloc(sizeof(*softif_neigh), GFP_ATOMIC);
174 if (!softif_neigh) 174 if (!softif_neigh)
175 goto unlock; 175 goto unlock;
176 176
@@ -242,7 +242,8 @@ static void softif_neigh_vid_select(struct bat_priv *bat_priv,
242 if (new_neigh && !atomic_inc_not_zero(&new_neigh->refcount)) 242 if (new_neigh && !atomic_inc_not_zero(&new_neigh->refcount))
243 new_neigh = NULL; 243 new_neigh = NULL;
244 244
245 curr_neigh = softif_neigh_vid->softif_neigh; 245 curr_neigh = rcu_dereference_protected(softif_neigh_vid->softif_neigh,
246 1);
246 rcu_assign_pointer(softif_neigh_vid->softif_neigh, new_neigh); 247 rcu_assign_pointer(softif_neigh_vid->softif_neigh, new_neigh);
247 248
248 if ((curr_neigh) && (!new_neigh)) 249 if ((curr_neigh) && (!new_neigh))
@@ -380,7 +381,7 @@ void softif_neigh_purge(struct bat_priv *bat_priv)
380 struct softif_neigh *softif_neigh, *curr_softif_neigh; 381 struct softif_neigh *softif_neigh, *curr_softif_neigh;
381 struct softif_neigh_vid *softif_neigh_vid; 382 struct softif_neigh_vid *softif_neigh_vid;
382 struct hlist_node *node, *node_tmp, *node_tmp2; 383 struct hlist_node *node, *node_tmp, *node_tmp2;
383 char do_deselect; 384 int do_deselect;
384 385
385 rcu_read_lock(); 386 rcu_read_lock();
386 hlist_for_each_entry_rcu(softif_neigh_vid, node, 387 hlist_for_each_entry_rcu(softif_neigh_vid, node,
@@ -534,7 +535,7 @@ static int interface_set_mac_addr(struct net_device *dev, void *p)
534 /* only modify transtable if it has been initialised before */ 535 /* only modify transtable if it has been initialised before */
535 if (atomic_read(&bat_priv->mesh_state) == MESH_ACTIVE) { 536 if (atomic_read(&bat_priv->mesh_state) == MESH_ACTIVE) {
536 tt_local_remove(bat_priv, dev->dev_addr, 537 tt_local_remove(bat_priv, dev->dev_addr,
537 "mac address changed"); 538 "mac address changed", false);
538 tt_local_add(dev, addr->sa_data); 539 tt_local_add(dev, addr->sa_data);
539 } 540 }
540 541
@@ -553,7 +554,7 @@ static int interface_change_mtu(struct net_device *dev, int new_mtu)
553 return 0; 554 return 0;
554} 555}
555 556
556int interface_tx(struct sk_buff *skb, struct net_device *soft_iface) 557static int interface_tx(struct sk_buff *skb, struct net_device *soft_iface)
557{ 558{
558 struct ethhdr *ethhdr = (struct ethhdr *)skb->data; 559 struct ethhdr *ethhdr = (struct ethhdr *)skb->data;
559 struct bat_priv *bat_priv = netdev_priv(soft_iface); 560 struct bat_priv *bat_priv = netdev_priv(soft_iface);
@@ -561,9 +562,10 @@ int interface_tx(struct sk_buff *skb, struct net_device *soft_iface)
561 struct bcast_packet *bcast_packet; 562 struct bcast_packet *bcast_packet;
562 struct vlan_ethhdr *vhdr; 563 struct vlan_ethhdr *vhdr;
563 struct softif_neigh *curr_softif_neigh = NULL; 564 struct softif_neigh *curr_softif_neigh = NULL;
565 struct orig_node *orig_node = NULL;
564 int data_len = skb->len, ret; 566 int data_len = skb->len, ret;
565 short vid = -1; 567 short vid = -1;
566 bool do_bcast = false; 568 bool do_bcast;
567 569
568 if (atomic_read(&bat_priv->mesh_state) != MESH_ACTIVE) 570 if (atomic_read(&bat_priv->mesh_state) != MESH_ACTIVE)
569 goto dropped; 571 goto dropped;
@@ -592,17 +594,19 @@ int interface_tx(struct sk_buff *skb, struct net_device *soft_iface)
592 if (curr_softif_neigh) 594 if (curr_softif_neigh)
593 goto dropped; 595 goto dropped;
594 596
595 /* TODO: check this for locks */ 597 /* Register the client MAC in the transtable */
596 tt_local_add(soft_iface, ethhdr->h_source); 598 tt_local_add(soft_iface, ethhdr->h_source);
597 599
598 if (is_multicast_ether_addr(ethhdr->h_dest)) { 600 orig_node = transtable_search(bat_priv, ethhdr->h_dest);
599 ret = gw_is_target(bat_priv, skb); 601 do_bcast = is_multicast_ether_addr(ethhdr->h_dest);
602 if (do_bcast || (orig_node && orig_node->gw_flags)) {
603 ret = gw_is_target(bat_priv, skb, orig_node);
600 604
601 if (ret < 0) 605 if (ret < 0)
602 goto dropped; 606 goto dropped;
603 607
604 if (ret == 0) 608 if (ret)
605 do_bcast = true; 609 do_bcast = false;
606 } 610 }
607 611
608 /* ethernet packet should be broadcasted */ 612 /* ethernet packet should be broadcasted */
@@ -611,7 +615,7 @@ int interface_tx(struct sk_buff *skb, struct net_device *soft_iface)
611 if (!primary_if) 615 if (!primary_if)
612 goto dropped; 616 goto dropped;
613 617
614 if (my_skb_head_push(skb, sizeof(struct bcast_packet)) < 0) 618 if (my_skb_head_push(skb, sizeof(*bcast_packet)) < 0)
615 goto dropped; 619 goto dropped;
616 620
617 bcast_packet = (struct bcast_packet *)skb->data; 621 bcast_packet = (struct bcast_packet *)skb->data;
@@ -630,7 +634,7 @@ int interface_tx(struct sk_buff *skb, struct net_device *soft_iface)
630 bcast_packet->seqno = 634 bcast_packet->seqno =
631 htonl(atomic_inc_return(&bat_priv->bcast_seqno)); 635 htonl(atomic_inc_return(&bat_priv->bcast_seqno));
632 636
633 add_bcast_packet_to_list(bat_priv, skb); 637 add_bcast_packet_to_list(bat_priv, skb, 1);
634 638
635 /* a copy is stored in the bcast list, therefore removing 639 /* a copy is stored in the bcast list, therefore removing
636 * the original skb. */ 640 * the original skb. */
@@ -656,6 +660,8 @@ end:
656 softif_neigh_free_ref(curr_softif_neigh); 660 softif_neigh_free_ref(curr_softif_neigh);
657 if (primary_if) 661 if (primary_if)
658 hardif_free_ref(primary_if); 662 hardif_free_ref(primary_if);
663 if (orig_node)
664 orig_node_free_ref(orig_node);
659 return NETDEV_TX_OK; 665 return NETDEV_TX_OK;
660} 666}
661 667
@@ -744,7 +750,6 @@ out:
744 return; 750 return;
745} 751}
746 752
747#ifdef HAVE_NET_DEVICE_OPS
748static const struct net_device_ops bat_netdev_ops = { 753static const struct net_device_ops bat_netdev_ops = {
749 .ndo_open = interface_open, 754 .ndo_open = interface_open,
750 .ndo_stop = interface_release, 755 .ndo_stop = interface_release,
@@ -754,7 +759,6 @@ static const struct net_device_ops bat_netdev_ops = {
754 .ndo_start_xmit = interface_tx, 759 .ndo_start_xmit = interface_tx,
755 .ndo_validate_addr = eth_validate_addr 760 .ndo_validate_addr = eth_validate_addr
756}; 761};
757#endif
758 762
759static void interface_setup(struct net_device *dev) 763static void interface_setup(struct net_device *dev)
760{ 764{
@@ -763,16 +767,7 @@ static void interface_setup(struct net_device *dev)
763 767
764 ether_setup(dev); 768 ether_setup(dev);
765 769
766#ifdef HAVE_NET_DEVICE_OPS
767 dev->netdev_ops = &bat_netdev_ops; 770 dev->netdev_ops = &bat_netdev_ops;
768#else
769 dev->open = interface_open;
770 dev->stop = interface_release;
771 dev->get_stats = interface_stats;
772 dev->set_mac_address = interface_set_mac_addr;
773 dev->change_mtu = interface_change_mtu;
774 dev->hard_start_xmit = interface_tx;
775#endif
776 dev->destructor = free_netdev; 771 dev->destructor = free_netdev;
777 dev->tx_queue_len = 0; 772 dev->tx_queue_len = 0;
778 773
@@ -790,17 +785,16 @@ static void interface_setup(struct net_device *dev)
790 785
791 SET_ETHTOOL_OPS(dev, &bat_ethtool_ops); 786 SET_ETHTOOL_OPS(dev, &bat_ethtool_ops);
792 787
793 memset(priv, 0, sizeof(struct bat_priv)); 788 memset(priv, 0, sizeof(*priv));
794} 789}
795 790
796struct net_device *softif_create(char *name) 791struct net_device *softif_create(const char *name)
797{ 792{
798 struct net_device *soft_iface; 793 struct net_device *soft_iface;
799 struct bat_priv *bat_priv; 794 struct bat_priv *bat_priv;
800 int ret; 795 int ret;
801 796
802 soft_iface = alloc_netdev(sizeof(struct bat_priv) , name, 797 soft_iface = alloc_netdev(sizeof(*bat_priv), name, interface_setup);
803 interface_setup);
804 798
805 if (!soft_iface) { 799 if (!soft_iface) {
806 pr_err("Unable to allocate the batman interface: %s\n", name); 800 pr_err("Unable to allocate the batman interface: %s\n", name);
@@ -831,7 +825,13 @@ struct net_device *softif_create(char *name)
831 825
832 atomic_set(&bat_priv->mesh_state, MESH_INACTIVE); 826 atomic_set(&bat_priv->mesh_state, MESH_INACTIVE);
833 atomic_set(&bat_priv->bcast_seqno, 1); 827 atomic_set(&bat_priv->bcast_seqno, 1);
834 atomic_set(&bat_priv->tt_local_changed, 0); 828 atomic_set(&bat_priv->ttvn, 0);
829 atomic_set(&bat_priv->tt_local_changes, 0);
830 atomic_set(&bat_priv->tt_ogm_append_cnt, 0);
831
832 bat_priv->tt_buff = NULL;
833 bat_priv->tt_buff_len = 0;
834 bat_priv->tt_poss_change = false;
835 835
836 bat_priv->primary_if = NULL; 836 bat_priv->primary_if = NULL;
837 bat_priv->num_ifaces = 0; 837 bat_priv->num_ifaces = 0;
@@ -872,15 +872,10 @@ void softif_destroy(struct net_device *soft_iface)
872 unregister_netdevice(soft_iface); 872 unregister_netdevice(soft_iface);
873} 873}
874 874
875int softif_is_valid(struct net_device *net_dev) 875int softif_is_valid(const struct net_device *net_dev)
876{ 876{
877#ifdef HAVE_NET_DEVICE_OPS
878 if (net_dev->netdev_ops->ndo_start_xmit == interface_tx) 877 if (net_dev->netdev_ops->ndo_start_xmit == interface_tx)
879 return 1; 878 return 1;
880#else
881 if (net_dev->hard_start_xmit == interface_tx)
882 return 1;
883#endif
884 879
885 return 0; 880 return 0;
886} 881}
@@ -924,4 +919,3 @@ static u32 bat_get_link(struct net_device *dev)
924{ 919{
925 return 1; 920 return 1;
926} 921}
927
diff --git a/net/batman-adv/soft-interface.h b/net/batman-adv/soft-interface.h
index 4789b6f2a0b..001546fc96f 100644
--- a/net/batman-adv/soft-interface.h
+++ b/net/batman-adv/soft-interface.h
@@ -25,12 +25,11 @@
25int my_skb_head_push(struct sk_buff *skb, unsigned int len); 25int my_skb_head_push(struct sk_buff *skb, unsigned int len);
26int softif_neigh_seq_print_text(struct seq_file *seq, void *offset); 26int softif_neigh_seq_print_text(struct seq_file *seq, void *offset);
27void softif_neigh_purge(struct bat_priv *bat_priv); 27void softif_neigh_purge(struct bat_priv *bat_priv);
28int interface_tx(struct sk_buff *skb, struct net_device *soft_iface);
29void interface_rx(struct net_device *soft_iface, 28void interface_rx(struct net_device *soft_iface,
30 struct sk_buff *skb, struct hard_iface *recv_if, 29 struct sk_buff *skb, struct hard_iface *recv_if,
31 int hdr_size); 30 int hdr_size);
32struct net_device *softif_create(char *name); 31struct net_device *softif_create(const char *name);
33void softif_destroy(struct net_device *soft_iface); 32void softif_destroy(struct net_device *soft_iface);
34int softif_is_valid(struct net_device *net_dev); 33int softif_is_valid(const struct net_device *net_dev);
35 34
36#endif /* _NET_BATMAN_ADV_SOFT_INTERFACE_H_ */ 35#endif /* _NET_BATMAN_ADV_SOFT_INTERFACE_H_ */
diff --git a/net/batman-adv/translation-table.c b/net/batman-adv/translation-table.c
index 7b729660cbf..d58fd8b9c81 100644
--- a/net/batman-adv/translation-table.c
+++ b/net/batman-adv/translation-table.c
@@ -23,38 +23,45 @@
23#include "translation-table.h" 23#include "translation-table.h"
24#include "soft-interface.h" 24#include "soft-interface.h"
25#include "hard-interface.h" 25#include "hard-interface.h"
26#include "send.h"
26#include "hash.h" 27#include "hash.h"
27#include "originator.h" 28#include "originator.h"
29#include "routing.h"
28 30
29static void tt_local_purge(struct work_struct *work); 31#include <linux/crc16.h>
30static void _tt_global_del_orig(struct bat_priv *bat_priv, 32
31 struct tt_global_entry *tt_global_entry, 33static void _tt_global_del(struct bat_priv *bat_priv,
32 char *message); 34 struct tt_global_entry *tt_global_entry,
35 const char *message);
36static void tt_purge(struct work_struct *work);
33 37
34/* returns 1 if they are the same mac addr */ 38/* returns 1 if they are the same mac addr */
35static int compare_ltt(struct hlist_node *node, void *data2) 39static int compare_ltt(const struct hlist_node *node, const void *data2)
36{ 40{
37 void *data1 = container_of(node, struct tt_local_entry, hash_entry); 41 const void *data1 = container_of(node, struct tt_local_entry,
42 hash_entry);
38 43
39 return (memcmp(data1, data2, ETH_ALEN) == 0 ? 1 : 0); 44 return (memcmp(data1, data2, ETH_ALEN) == 0 ? 1 : 0);
40} 45}
41 46
42/* returns 1 if they are the same mac addr */ 47/* returns 1 if they are the same mac addr */
43static int compare_gtt(struct hlist_node *node, void *data2) 48static int compare_gtt(const struct hlist_node *node, const void *data2)
44{ 49{
45 void *data1 = container_of(node, struct tt_global_entry, hash_entry); 50 const void *data1 = container_of(node, struct tt_global_entry,
51 hash_entry);
46 52
47 return (memcmp(data1, data2, ETH_ALEN) == 0 ? 1 : 0); 53 return (memcmp(data1, data2, ETH_ALEN) == 0 ? 1 : 0);
48} 54}
49 55
50static void tt_local_start_timer(struct bat_priv *bat_priv) 56static void tt_start_timer(struct bat_priv *bat_priv)
51{ 57{
52 INIT_DELAYED_WORK(&bat_priv->tt_work, tt_local_purge); 58 INIT_DELAYED_WORK(&bat_priv->tt_work, tt_purge);
53 queue_delayed_work(bat_event_workqueue, &bat_priv->tt_work, 10 * HZ); 59 queue_delayed_work(bat_event_workqueue, &bat_priv->tt_work,
60 msecs_to_jiffies(5000));
54} 61}
55 62
56static struct tt_local_entry *tt_local_hash_find(struct bat_priv *bat_priv, 63static struct tt_local_entry *tt_local_hash_find(struct bat_priv *bat_priv,
57 void *data) 64 const void *data)
58{ 65{
59 struct hashtable_t *hash = bat_priv->tt_local_hash; 66 struct hashtable_t *hash = bat_priv->tt_local_hash;
60 struct hlist_head *head; 67 struct hlist_head *head;
@@ -73,6 +80,9 @@ static struct tt_local_entry *tt_local_hash_find(struct bat_priv *bat_priv,
73 if (!compare_eth(tt_local_entry, data)) 80 if (!compare_eth(tt_local_entry, data))
74 continue; 81 continue;
75 82
83 if (!atomic_inc_not_zero(&tt_local_entry->refcount))
84 continue;
85
76 tt_local_entry_tmp = tt_local_entry; 86 tt_local_entry_tmp = tt_local_entry;
77 break; 87 break;
78 } 88 }
@@ -82,7 +92,7 @@ static struct tt_local_entry *tt_local_hash_find(struct bat_priv *bat_priv,
82} 92}
83 93
84static struct tt_global_entry *tt_global_hash_find(struct bat_priv *bat_priv, 94static struct tt_global_entry *tt_global_hash_find(struct bat_priv *bat_priv,
85 void *data) 95 const void *data)
86{ 96{
87 struct hashtable_t *hash = bat_priv->tt_global_hash; 97 struct hashtable_t *hash = bat_priv->tt_global_hash;
88 struct hlist_head *head; 98 struct hlist_head *head;
@@ -102,6 +112,9 @@ static struct tt_global_entry *tt_global_hash_find(struct bat_priv *bat_priv,
102 if (!compare_eth(tt_global_entry, data)) 112 if (!compare_eth(tt_global_entry, data))
103 continue; 113 continue;
104 114
115 if (!atomic_inc_not_zero(&tt_global_entry->refcount))
116 continue;
117
105 tt_global_entry_tmp = tt_global_entry; 118 tt_global_entry_tmp = tt_global_entry;
106 break; 119 break;
107 } 120 }
@@ -110,7 +123,66 @@ static struct tt_global_entry *tt_global_hash_find(struct bat_priv *bat_priv,
110 return tt_global_entry_tmp; 123 return tt_global_entry_tmp;
111} 124}
112 125
113int tt_local_init(struct bat_priv *bat_priv) 126static bool is_out_of_time(unsigned long starting_time, unsigned long timeout)
127{
128 unsigned long deadline;
129 deadline = starting_time + msecs_to_jiffies(timeout);
130
131 return time_after(jiffies, deadline);
132}
133
134static void tt_local_entry_free_ref(struct tt_local_entry *tt_local_entry)
135{
136 if (atomic_dec_and_test(&tt_local_entry->refcount))
137 kfree_rcu(tt_local_entry, rcu);
138}
139
140static void tt_global_entry_free_rcu(struct rcu_head *rcu)
141{
142 struct tt_global_entry *tt_global_entry;
143
144 tt_global_entry = container_of(rcu, struct tt_global_entry, rcu);
145
146 if (tt_global_entry->orig_node)
147 orig_node_free_ref(tt_global_entry->orig_node);
148
149 kfree(tt_global_entry);
150}
151
152static void tt_global_entry_free_ref(struct tt_global_entry *tt_global_entry)
153{
154 if (atomic_dec_and_test(&tt_global_entry->refcount))
155 call_rcu(&tt_global_entry->rcu, tt_global_entry_free_rcu);
156}
157
158static void tt_local_event(struct bat_priv *bat_priv, const uint8_t *addr,
159 uint8_t flags)
160{
161 struct tt_change_node *tt_change_node;
162
163 tt_change_node = kmalloc(sizeof(*tt_change_node), GFP_ATOMIC);
164
165 if (!tt_change_node)
166 return;
167
168 tt_change_node->change.flags = flags;
169 memcpy(tt_change_node->change.addr, addr, ETH_ALEN);
170
171 spin_lock_bh(&bat_priv->tt_changes_list_lock);
172 /* track the change in the OGMinterval list */
173 list_add_tail(&tt_change_node->list, &bat_priv->tt_changes_list);
174 atomic_inc(&bat_priv->tt_local_changes);
175 spin_unlock_bh(&bat_priv->tt_changes_list_lock);
176
177 atomic_set(&bat_priv->tt_ogm_append_cnt, 0);
178}
179
180int tt_len(int changes_num)
181{
182 return changes_num * sizeof(struct tt_change);
183}
184
185static int tt_local_init(struct bat_priv *bat_priv)
114{ 186{
115 if (bat_priv->tt_local_hash) 187 if (bat_priv->tt_local_hash)
116 return 1; 188 return 1;
@@ -120,116 +192,114 @@ int tt_local_init(struct bat_priv *bat_priv)
120 if (!bat_priv->tt_local_hash) 192 if (!bat_priv->tt_local_hash)
121 return 0; 193 return 0;
122 194
123 atomic_set(&bat_priv->tt_local_changed, 0);
124 tt_local_start_timer(bat_priv);
125
126 return 1; 195 return 1;
127} 196}
128 197
129void tt_local_add(struct net_device *soft_iface, uint8_t *addr) 198void tt_local_add(struct net_device *soft_iface, const uint8_t *addr)
130{ 199{
131 struct bat_priv *bat_priv = netdev_priv(soft_iface); 200 struct bat_priv *bat_priv = netdev_priv(soft_iface);
132 struct tt_local_entry *tt_local_entry; 201 struct tt_local_entry *tt_local_entry = NULL;
133 struct tt_global_entry *tt_global_entry; 202 struct tt_global_entry *tt_global_entry = NULL;
134 int required_bytes;
135 203
136 spin_lock_bh(&bat_priv->tt_lhash_lock);
137 tt_local_entry = tt_local_hash_find(bat_priv, addr); 204 tt_local_entry = tt_local_hash_find(bat_priv, addr);
138 spin_unlock_bh(&bat_priv->tt_lhash_lock);
139 205
140 if (tt_local_entry) { 206 if (tt_local_entry) {
141 tt_local_entry->last_seen = jiffies; 207 tt_local_entry->last_seen = jiffies;
142 return; 208 goto out;
143 }
144
145 /* only announce as many hosts as possible in the batman-packet and
146 space in batman_packet->num_tt That also should give a limit to
147 MAC-flooding. */
148 required_bytes = (bat_priv->num_local_tt + 1) * ETH_ALEN;
149 required_bytes += BAT_PACKET_LEN;
150
151 if ((required_bytes > ETH_DATA_LEN) ||
152 (atomic_read(&bat_priv->aggregated_ogms) &&
153 required_bytes > MAX_AGGREGATION_BYTES) ||
154 (bat_priv->num_local_tt + 1 > 255)) {
155 bat_dbg(DBG_ROUTES, bat_priv,
156 "Can't add new local tt entry (%pM): "
157 "number of local tt entries exceeds packet size\n",
158 addr);
159 return;
160 } 209 }
161 210
162 bat_dbg(DBG_ROUTES, bat_priv, 211 tt_local_entry = kmalloc(sizeof(*tt_local_entry), GFP_ATOMIC);
163 "Creating new local tt entry: %pM\n", addr);
164
165 tt_local_entry = kmalloc(sizeof(struct tt_local_entry), GFP_ATOMIC);
166 if (!tt_local_entry) 212 if (!tt_local_entry)
167 return; 213 goto out;
214
215 bat_dbg(DBG_TT, bat_priv,
216 "Creating new local tt entry: %pM (ttvn: %d)\n", addr,
217 (uint8_t)atomic_read(&bat_priv->ttvn));
168 218
169 memcpy(tt_local_entry->addr, addr, ETH_ALEN); 219 memcpy(tt_local_entry->addr, addr, ETH_ALEN);
170 tt_local_entry->last_seen = jiffies; 220 tt_local_entry->last_seen = jiffies;
221 tt_local_entry->flags = NO_FLAGS;
222 atomic_set(&tt_local_entry->refcount, 2);
171 223
172 /* the batman interface mac address should never be purged */ 224 /* the batman interface mac address should never be purged */
173 if (compare_eth(addr, soft_iface->dev_addr)) 225 if (compare_eth(addr, soft_iface->dev_addr))
174 tt_local_entry->never_purge = 1; 226 tt_local_entry->flags |= TT_CLIENT_NOPURGE;
175 else
176 tt_local_entry->never_purge = 0;
177 227
178 spin_lock_bh(&bat_priv->tt_lhash_lock); 228 tt_local_event(bat_priv, addr, tt_local_entry->flags);
229
230 /* The local entry has to be marked as NEW to avoid to send it in
231 * a full table response going out before the next ttvn increment
232 * (consistency check) */
233 tt_local_entry->flags |= TT_CLIENT_NEW;
179 234
180 hash_add(bat_priv->tt_local_hash, compare_ltt, choose_orig, 235 hash_add(bat_priv->tt_local_hash, compare_ltt, choose_orig,
181 tt_local_entry, &tt_local_entry->hash_entry); 236 tt_local_entry, &tt_local_entry->hash_entry);
182 bat_priv->num_local_tt++;
183 atomic_set(&bat_priv->tt_local_changed, 1);
184
185 spin_unlock_bh(&bat_priv->tt_lhash_lock);
186 237
187 /* remove address from global hash if present */ 238 /* remove address from global hash if present */
188 spin_lock_bh(&bat_priv->tt_ghash_lock);
189
190 tt_global_entry = tt_global_hash_find(bat_priv, addr); 239 tt_global_entry = tt_global_hash_find(bat_priv, addr);
191 240
241 /* Check whether it is a roaming! */
242 if (tt_global_entry) {
243 /* This node is probably going to update its tt table */
244 tt_global_entry->orig_node->tt_poss_change = true;
245 /* The global entry has to be marked as PENDING and has to be
246 * kept for consistency purpose */
247 tt_global_entry->flags |= TT_CLIENT_PENDING;
248 send_roam_adv(bat_priv, tt_global_entry->addr,
249 tt_global_entry->orig_node);
250 }
251out:
252 if (tt_local_entry)
253 tt_local_entry_free_ref(tt_local_entry);
192 if (tt_global_entry) 254 if (tt_global_entry)
193 _tt_global_del_orig(bat_priv, tt_global_entry, 255 tt_global_entry_free_ref(tt_global_entry);
194 "local tt received");
195
196 spin_unlock_bh(&bat_priv->tt_ghash_lock);
197} 256}
198 257
199int tt_local_fill_buffer(struct bat_priv *bat_priv, 258int tt_changes_fill_buffer(struct bat_priv *bat_priv,
200 unsigned char *buff, int buff_len) 259 unsigned char *buff, int buff_len)
201{ 260{
202 struct hashtable_t *hash = bat_priv->tt_local_hash; 261 int count = 0, tot_changes = 0;
203 struct tt_local_entry *tt_local_entry; 262 struct tt_change_node *entry, *safe;
204 struct hlist_node *node;
205 struct hlist_head *head;
206 int i, count = 0;
207 263
208 spin_lock_bh(&bat_priv->tt_lhash_lock); 264 if (buff_len > 0)
265 tot_changes = buff_len / tt_len(1);
209 266
210 for (i = 0; i < hash->size; i++) { 267 spin_lock_bh(&bat_priv->tt_changes_list_lock);
211 head = &hash->table[i]; 268 atomic_set(&bat_priv->tt_local_changes, 0);
212
213 rcu_read_lock();
214 hlist_for_each_entry_rcu(tt_local_entry, node,
215 head, hash_entry) {
216 if (buff_len < (count + 1) * ETH_ALEN)
217 break;
218
219 memcpy(buff + (count * ETH_ALEN), tt_local_entry->addr,
220 ETH_ALEN);
221 269
270 list_for_each_entry_safe(entry, safe, &bat_priv->tt_changes_list,
271 list) {
272 if (count < tot_changes) {
273 memcpy(buff + tt_len(count),
274 &entry->change, sizeof(struct tt_change));
222 count++; 275 count++;
223 } 276 }
224 rcu_read_unlock(); 277 list_del(&entry->list);
278 kfree(entry);
225 } 279 }
280 spin_unlock_bh(&bat_priv->tt_changes_list_lock);
281
282 /* Keep the buffer for possible tt_request */
283 spin_lock_bh(&bat_priv->tt_buff_lock);
284 kfree(bat_priv->tt_buff);
285 bat_priv->tt_buff_len = 0;
286 bat_priv->tt_buff = NULL;
287 /* We check whether this new OGM has no changes due to size
288 * problems */
289 if (buff_len > 0) {
290 /**
291 * if kmalloc() fails we will reply with the full table
292 * instead of providing the diff
293 */
294 bat_priv->tt_buff = kmalloc(buff_len, GFP_ATOMIC);
295 if (bat_priv->tt_buff) {
296 memcpy(bat_priv->tt_buff, buff, buff_len);
297 bat_priv->tt_buff_len = buff_len;
298 }
299 }
300 spin_unlock_bh(&bat_priv->tt_buff_lock);
226 301
227 /* if we did not get all new local tts see you next time ;-) */ 302 return tot_changes;
228 if (count == bat_priv->num_local_tt)
229 atomic_set(&bat_priv->tt_local_changed, 0);
230
231 spin_unlock_bh(&bat_priv->tt_lhash_lock);
232 return count;
233} 303}
234 304
235int tt_local_seq_print_text(struct seq_file *seq, void *offset) 305int tt_local_seq_print_text(struct seq_file *seq, void *offset)
@@ -261,10 +331,8 @@ int tt_local_seq_print_text(struct seq_file *seq, void *offset)
261 } 331 }
262 332
263 seq_printf(seq, "Locally retrieved addresses (from %s) " 333 seq_printf(seq, "Locally retrieved addresses (from %s) "
264 "announced via TT:\n", 334 "announced via TT (TTVN: %u):\n",
265 net_dev->name); 335 net_dev->name, (uint8_t)atomic_read(&bat_priv->ttvn));
266
267 spin_lock_bh(&bat_priv->tt_lhash_lock);
268 336
269 buf_size = 1; 337 buf_size = 1;
270 /* Estimate length for: " * xx:xx:xx:xx:xx:xx\n" */ 338 /* Estimate length for: " * xx:xx:xx:xx:xx:xx\n" */
@@ -279,7 +347,6 @@ int tt_local_seq_print_text(struct seq_file *seq, void *offset)
279 347
280 buff = kmalloc(buf_size, GFP_ATOMIC); 348 buff = kmalloc(buf_size, GFP_ATOMIC);
281 if (!buff) { 349 if (!buff) {
282 spin_unlock_bh(&bat_priv->tt_lhash_lock);
283 ret = -ENOMEM; 350 ret = -ENOMEM;
284 goto out; 351 goto out;
285 } 352 }
@@ -299,8 +366,6 @@ int tt_local_seq_print_text(struct seq_file *seq, void *offset)
299 rcu_read_unlock(); 366 rcu_read_unlock();
300 } 367 }
301 368
302 spin_unlock_bh(&bat_priv->tt_lhash_lock);
303
304 seq_printf(seq, "%s", buff); 369 seq_printf(seq, "%s", buff);
305 kfree(buff); 370 kfree(buff);
306out: 371out:
@@ -309,92 +374,109 @@ out:
309 return ret; 374 return ret;
310} 375}
311 376
312static void _tt_local_del(struct hlist_node *node, void *arg) 377static void tt_local_set_pending(struct bat_priv *bat_priv,
378 struct tt_local_entry *tt_local_entry,
379 uint16_t flags)
313{ 380{
314 struct bat_priv *bat_priv = (struct bat_priv *)arg; 381 tt_local_event(bat_priv, tt_local_entry->addr,
315 void *data = container_of(node, struct tt_local_entry, hash_entry); 382 tt_local_entry->flags | flags);
316 383
317 kfree(data); 384 /* The local client has to be merked as "pending to be removed" but has
318 bat_priv->num_local_tt--; 385 * to be kept in the table in order to send it in an full tables
319 atomic_set(&bat_priv->tt_local_changed, 1); 386 * response issued before the net ttvn increment (consistency check) */
387 tt_local_entry->flags |= TT_CLIENT_PENDING;
320} 388}
321 389
322static void tt_local_del(struct bat_priv *bat_priv, 390void tt_local_remove(struct bat_priv *bat_priv, const uint8_t *addr,
323 struct tt_local_entry *tt_local_entry, 391 const char *message, bool roaming)
324 char *message)
325{ 392{
326 bat_dbg(DBG_ROUTES, bat_priv, "Deleting local tt entry (%pM): %s\n", 393 struct tt_local_entry *tt_local_entry = NULL;
327 tt_local_entry->addr, message);
328
329 hash_remove(bat_priv->tt_local_hash, compare_ltt, choose_orig,
330 tt_local_entry->addr);
331 _tt_local_del(&tt_local_entry->hash_entry, bat_priv);
332}
333
334void tt_local_remove(struct bat_priv *bat_priv,
335 uint8_t *addr, char *message)
336{
337 struct tt_local_entry *tt_local_entry;
338
339 spin_lock_bh(&bat_priv->tt_lhash_lock);
340 394
341 tt_local_entry = tt_local_hash_find(bat_priv, addr); 395 tt_local_entry = tt_local_hash_find(bat_priv, addr);
396 if (!tt_local_entry)
397 goto out;
342 398
343 if (tt_local_entry) 399 tt_local_set_pending(bat_priv, tt_local_entry, TT_CLIENT_DEL |
344 tt_local_del(bat_priv, tt_local_entry, message); 400 (roaming ? TT_CLIENT_ROAM : NO_FLAGS));
345 401
346 spin_unlock_bh(&bat_priv->tt_lhash_lock); 402 bat_dbg(DBG_TT, bat_priv, "Local tt entry (%pM) pending to be removed: "
403 "%s\n", tt_local_entry->addr, message);
404out:
405 if (tt_local_entry)
406 tt_local_entry_free_ref(tt_local_entry);
347} 407}
348 408
349static void tt_local_purge(struct work_struct *work) 409static void tt_local_purge(struct bat_priv *bat_priv)
350{ 410{
351 struct delayed_work *delayed_work =
352 container_of(work, struct delayed_work, work);
353 struct bat_priv *bat_priv =
354 container_of(delayed_work, struct bat_priv, tt_work);
355 struct hashtable_t *hash = bat_priv->tt_local_hash; 411 struct hashtable_t *hash = bat_priv->tt_local_hash;
356 struct tt_local_entry *tt_local_entry; 412 struct tt_local_entry *tt_local_entry;
357 struct hlist_node *node, *node_tmp; 413 struct hlist_node *node, *node_tmp;
358 struct hlist_head *head; 414 struct hlist_head *head;
359 unsigned long timeout; 415 spinlock_t *list_lock; /* protects write access to the hash lists */
360 int i; 416 int i;
361 417
362 spin_lock_bh(&bat_priv->tt_lhash_lock);
363
364 for (i = 0; i < hash->size; i++) { 418 for (i = 0; i < hash->size; i++) {
365 head = &hash->table[i]; 419 head = &hash->table[i];
420 list_lock = &hash->list_locks[i];
366 421
422 spin_lock_bh(list_lock);
367 hlist_for_each_entry_safe(tt_local_entry, node, node_tmp, 423 hlist_for_each_entry_safe(tt_local_entry, node, node_tmp,
368 head, hash_entry) { 424 head, hash_entry) {
369 if (tt_local_entry->never_purge) 425 if (tt_local_entry->flags & TT_CLIENT_NOPURGE)
370 continue; 426 continue;
371 427
372 timeout = tt_local_entry->last_seen; 428 /* entry already marked for deletion */
373 timeout += TT_LOCAL_TIMEOUT * HZ; 429 if (tt_local_entry->flags & TT_CLIENT_PENDING)
430 continue;
374 431
375 if (time_before(jiffies, timeout)) 432 if (!is_out_of_time(tt_local_entry->last_seen,
433 TT_LOCAL_TIMEOUT * 1000))
376 continue; 434 continue;
377 435
378 tt_local_del(bat_priv, tt_local_entry, 436 tt_local_set_pending(bat_priv, tt_local_entry,
379 "address timed out"); 437 TT_CLIENT_DEL);
438 bat_dbg(DBG_TT, bat_priv, "Local tt entry (%pM) "
439 "pending to be removed: timed out\n",
440 tt_local_entry->addr);
380 } 441 }
442 spin_unlock_bh(list_lock);
381 } 443 }
382 444
383 spin_unlock_bh(&bat_priv->tt_lhash_lock);
384 tt_local_start_timer(bat_priv);
385} 445}
386 446
387void tt_local_free(struct bat_priv *bat_priv) 447static void tt_local_table_free(struct bat_priv *bat_priv)
388{ 448{
449 struct hashtable_t *hash;
450 spinlock_t *list_lock; /* protects write access to the hash lists */
451 struct tt_local_entry *tt_local_entry;
452 struct hlist_node *node, *node_tmp;
453 struct hlist_head *head;
454 int i;
455
389 if (!bat_priv->tt_local_hash) 456 if (!bat_priv->tt_local_hash)
390 return; 457 return;
391 458
392 cancel_delayed_work_sync(&bat_priv->tt_work); 459 hash = bat_priv->tt_local_hash;
393 hash_delete(bat_priv->tt_local_hash, _tt_local_del, bat_priv); 460
461 for (i = 0; i < hash->size; i++) {
462 head = &hash->table[i];
463 list_lock = &hash->list_locks[i];
464
465 spin_lock_bh(list_lock);
466 hlist_for_each_entry_safe(tt_local_entry, node, node_tmp,
467 head, hash_entry) {
468 hlist_del_rcu(node);
469 tt_local_entry_free_ref(tt_local_entry);
470 }
471 spin_unlock_bh(list_lock);
472 }
473
474 hash_destroy(hash);
475
394 bat_priv->tt_local_hash = NULL; 476 bat_priv->tt_local_hash = NULL;
395} 477}
396 478
397int tt_global_init(struct bat_priv *bat_priv) 479static int tt_global_init(struct bat_priv *bat_priv)
398{ 480{
399 if (bat_priv->tt_global_hash) 481 if (bat_priv->tt_global_hash)
400 return 1; 482 return 1;
@@ -407,74 +489,78 @@ int tt_global_init(struct bat_priv *bat_priv)
407 return 1; 489 return 1;
408} 490}
409 491
410void tt_global_add_orig(struct bat_priv *bat_priv, 492static void tt_changes_list_free(struct bat_priv *bat_priv)
411 struct orig_node *orig_node,
412 unsigned char *tt_buff, int tt_buff_len)
413{ 493{
414 struct tt_global_entry *tt_global_entry; 494 struct tt_change_node *entry, *safe;
415 struct tt_local_entry *tt_local_entry;
416 int tt_buff_count = 0;
417 unsigned char *tt_ptr;
418
419 while ((tt_buff_count + 1) * ETH_ALEN <= tt_buff_len) {
420 spin_lock_bh(&bat_priv->tt_ghash_lock);
421
422 tt_ptr = tt_buff + (tt_buff_count * ETH_ALEN);
423 tt_global_entry = tt_global_hash_find(bat_priv, tt_ptr);
424
425 if (!tt_global_entry) {
426 spin_unlock_bh(&bat_priv->tt_ghash_lock);
427 495
428 tt_global_entry = 496 spin_lock_bh(&bat_priv->tt_changes_list_lock);
429 kmalloc(sizeof(struct tt_global_entry),
430 GFP_ATOMIC);
431 497
432 if (!tt_global_entry) 498 list_for_each_entry_safe(entry, safe, &bat_priv->tt_changes_list,
433 break; 499 list) {
500 list_del(&entry->list);
501 kfree(entry);
502 }
434 503
435 memcpy(tt_global_entry->addr, tt_ptr, ETH_ALEN); 504 atomic_set(&bat_priv->tt_local_changes, 0);
505 spin_unlock_bh(&bat_priv->tt_changes_list_lock);
506}
436 507
437 bat_dbg(DBG_ROUTES, bat_priv, 508/* caller must hold orig_node refcount */
438 "Creating new global tt entry: " 509int tt_global_add(struct bat_priv *bat_priv, struct orig_node *orig_node,
439 "%pM (via %pM)\n", 510 const unsigned char *tt_addr, uint8_t ttvn, bool roaming)
440 tt_global_entry->addr, orig_node->orig); 511{
512 struct tt_global_entry *tt_global_entry;
513 struct orig_node *orig_node_tmp;
514 int ret = 0;
441 515
442 spin_lock_bh(&bat_priv->tt_ghash_lock); 516 tt_global_entry = tt_global_hash_find(bat_priv, tt_addr);
443 hash_add(bat_priv->tt_global_hash, compare_gtt,
444 choose_orig, tt_global_entry,
445 &tt_global_entry->hash_entry);
446 517
447 } 518 if (!tt_global_entry) {
519 tt_global_entry =
520 kmalloc(sizeof(*tt_global_entry),
521 GFP_ATOMIC);
522 if (!tt_global_entry)
523 goto out;
448 524
525 memcpy(tt_global_entry->addr, tt_addr, ETH_ALEN);
526 /* Assign the new orig_node */
527 atomic_inc(&orig_node->refcount);
449 tt_global_entry->orig_node = orig_node; 528 tt_global_entry->orig_node = orig_node;
450 spin_unlock_bh(&bat_priv->tt_ghash_lock); 529 tt_global_entry->ttvn = ttvn;
451 530 tt_global_entry->flags = NO_FLAGS;
452 /* remove address from local hash if present */ 531 tt_global_entry->roam_at = 0;
453 spin_lock_bh(&bat_priv->tt_lhash_lock); 532 atomic_set(&tt_global_entry->refcount, 2);
454 533
455 tt_ptr = tt_buff + (tt_buff_count * ETH_ALEN); 534 hash_add(bat_priv->tt_global_hash, compare_gtt,
456 tt_local_entry = tt_local_hash_find(bat_priv, tt_ptr); 535 choose_orig, tt_global_entry,
457 536 &tt_global_entry->hash_entry);
458 if (tt_local_entry) 537 atomic_inc(&orig_node->tt_size);
459 tt_local_del(bat_priv, tt_local_entry, 538 } else {
460 "global tt received"); 539 if (tt_global_entry->orig_node != orig_node) {
461 540 atomic_dec(&tt_global_entry->orig_node->tt_size);
462 spin_unlock_bh(&bat_priv->tt_lhash_lock); 541 orig_node_tmp = tt_global_entry->orig_node;
463 542 atomic_inc(&orig_node->refcount);
464 tt_buff_count++; 543 tt_global_entry->orig_node = orig_node;
544 orig_node_free_ref(orig_node_tmp);
545 atomic_inc(&orig_node->tt_size);
546 }
547 tt_global_entry->ttvn = ttvn;
548 tt_global_entry->flags = NO_FLAGS;
549 tt_global_entry->roam_at = 0;
465 } 550 }
466 551
467 /* initialize, and overwrite if malloc succeeds */ 552 bat_dbg(DBG_TT, bat_priv,
468 orig_node->tt_buff = NULL; 553 "Creating new global tt entry: %pM (via %pM)\n",
469 orig_node->tt_buff_len = 0; 554 tt_global_entry->addr, orig_node->orig);
470 555
471 if (tt_buff_len > 0) { 556 /* remove address from local hash if present */
472 orig_node->tt_buff = kmalloc(tt_buff_len, GFP_ATOMIC); 557 tt_local_remove(bat_priv, tt_global_entry->addr,
473 if (orig_node->tt_buff) { 558 "global tt received", roaming);
474 memcpy(orig_node->tt_buff, tt_buff, tt_buff_len); 559 ret = 1;
475 orig_node->tt_buff_len = tt_buff_len; 560out:
476 } 561 if (tt_global_entry)
477 } 562 tt_global_entry_free_ref(tt_global_entry);
563 return ret;
478} 564}
479 565
480int tt_global_seq_print_text(struct seq_file *seq, void *offset) 566int tt_global_seq_print_text(struct seq_file *seq, void *offset)
@@ -508,26 +594,27 @@ int tt_global_seq_print_text(struct seq_file *seq, void *offset)
508 seq_printf(seq, 594 seq_printf(seq,
509 "Globally announced TT entries received via the mesh %s\n", 595 "Globally announced TT entries received via the mesh %s\n",
510 net_dev->name); 596 net_dev->name);
511 597 seq_printf(seq, " %-13s %s %-15s %s\n",
512 spin_lock_bh(&bat_priv->tt_ghash_lock); 598 "Client", "(TTVN)", "Originator", "(Curr TTVN)");
513 599
514 buf_size = 1; 600 buf_size = 1;
515 /* Estimate length for: " * xx:xx:xx:xx:xx:xx via xx:xx:xx:xx:xx:xx\n"*/ 601 /* Estimate length for: " * xx:xx:xx:xx:xx:xx (ttvn) via
602 * xx:xx:xx:xx:xx:xx (cur_ttvn)\n"*/
516 for (i = 0; i < hash->size; i++) { 603 for (i = 0; i < hash->size; i++) {
517 head = &hash->table[i]; 604 head = &hash->table[i];
518 605
519 rcu_read_lock(); 606 rcu_read_lock();
520 __hlist_for_each_rcu(node, head) 607 __hlist_for_each_rcu(node, head)
521 buf_size += 43; 608 buf_size += 59;
522 rcu_read_unlock(); 609 rcu_read_unlock();
523 } 610 }
524 611
525 buff = kmalloc(buf_size, GFP_ATOMIC); 612 buff = kmalloc(buf_size, GFP_ATOMIC);
526 if (!buff) { 613 if (!buff) {
527 spin_unlock_bh(&bat_priv->tt_ghash_lock);
528 ret = -ENOMEM; 614 ret = -ENOMEM;
529 goto out; 615 goto out;
530 } 616 }
617
531 buff[0] = '\0'; 618 buff[0] = '\0';
532 pos = 0; 619 pos = 0;
533 620
@@ -537,16 +624,18 @@ int tt_global_seq_print_text(struct seq_file *seq, void *offset)
537 rcu_read_lock(); 624 rcu_read_lock();
538 hlist_for_each_entry_rcu(tt_global_entry, node, 625 hlist_for_each_entry_rcu(tt_global_entry, node,
539 head, hash_entry) { 626 head, hash_entry) {
540 pos += snprintf(buff + pos, 44, 627 pos += snprintf(buff + pos, 61,
541 " * %pM via %pM\n", 628 " * %pM (%3u) via %pM (%3u)\n",
542 tt_global_entry->addr, 629 tt_global_entry->addr,
543 tt_global_entry->orig_node->orig); 630 tt_global_entry->ttvn,
631 tt_global_entry->orig_node->orig,
632 (uint8_t) atomic_read(
633 &tt_global_entry->orig_node->
634 last_ttvn));
544 } 635 }
545 rcu_read_unlock(); 636 rcu_read_unlock();
546 } 637 }
547 638
548 spin_unlock_bh(&bat_priv->tt_ghash_lock);
549
550 seq_printf(seq, "%s", buff); 639 seq_printf(seq, "%s", buff);
551 kfree(buff); 640 kfree(buff);
552out: 641out:
@@ -555,84 +644,1099 @@ out:
555 return ret; 644 return ret;
556} 645}
557 646
558static void _tt_global_del_orig(struct bat_priv *bat_priv, 647static void _tt_global_del(struct bat_priv *bat_priv,
559 struct tt_global_entry *tt_global_entry, 648 struct tt_global_entry *tt_global_entry,
560 char *message) 649 const char *message)
561{ 650{
562 bat_dbg(DBG_ROUTES, bat_priv, 651 if (!tt_global_entry)
652 goto out;
653
654 bat_dbg(DBG_TT, bat_priv,
563 "Deleting global tt entry %pM (via %pM): %s\n", 655 "Deleting global tt entry %pM (via %pM): %s\n",
564 tt_global_entry->addr, tt_global_entry->orig_node->orig, 656 tt_global_entry->addr, tt_global_entry->orig_node->orig,
565 message); 657 message);
566 658
659 atomic_dec(&tt_global_entry->orig_node->tt_size);
660
567 hash_remove(bat_priv->tt_global_hash, compare_gtt, choose_orig, 661 hash_remove(bat_priv->tt_global_hash, compare_gtt, choose_orig,
568 tt_global_entry->addr); 662 tt_global_entry->addr);
569 kfree(tt_global_entry); 663out:
664 if (tt_global_entry)
665 tt_global_entry_free_ref(tt_global_entry);
666}
667
668void tt_global_del(struct bat_priv *bat_priv,
669 struct orig_node *orig_node, const unsigned char *addr,
670 const char *message, bool roaming)
671{
672 struct tt_global_entry *tt_global_entry = NULL;
673
674 tt_global_entry = tt_global_hash_find(bat_priv, addr);
675 if (!tt_global_entry)
676 goto out;
677
678 if (tt_global_entry->orig_node == orig_node) {
679 if (roaming) {
680 tt_global_entry->flags |= TT_CLIENT_ROAM;
681 tt_global_entry->roam_at = jiffies;
682 goto out;
683 }
684 _tt_global_del(bat_priv, tt_global_entry, message);
685 }
686out:
687 if (tt_global_entry)
688 tt_global_entry_free_ref(tt_global_entry);
570} 689}
571 690
572void tt_global_del_orig(struct bat_priv *bat_priv, 691void tt_global_del_orig(struct bat_priv *bat_priv,
573 struct orig_node *orig_node, char *message) 692 struct orig_node *orig_node, const char *message)
574{ 693{
575 struct tt_global_entry *tt_global_entry; 694 struct tt_global_entry *tt_global_entry;
576 int tt_buff_count = 0; 695 int i;
577 unsigned char *tt_ptr; 696 struct hashtable_t *hash = bat_priv->tt_global_hash;
697 struct hlist_node *node, *safe;
698 struct hlist_head *head;
699 spinlock_t *list_lock; /* protects write access to the hash lists */
578 700
579 if (orig_node->tt_buff_len == 0) 701 if (!hash)
580 return; 702 return;
581 703
582 spin_lock_bh(&bat_priv->tt_ghash_lock); 704 for (i = 0; i < hash->size; i++) {
583 705 head = &hash->table[i];
584 while ((tt_buff_count + 1) * ETH_ALEN <= orig_node->tt_buff_len) { 706 list_lock = &hash->list_locks[i];
585 tt_ptr = orig_node->tt_buff + (tt_buff_count * ETH_ALEN);
586 tt_global_entry = tt_global_hash_find(bat_priv, tt_ptr);
587
588 if ((tt_global_entry) &&
589 (tt_global_entry->orig_node == orig_node))
590 _tt_global_del_orig(bat_priv, tt_global_entry,
591 message);
592 707
593 tt_buff_count++; 708 spin_lock_bh(list_lock);
709 hlist_for_each_entry_safe(tt_global_entry, node, safe,
710 head, hash_entry) {
711 if (tt_global_entry->orig_node == orig_node) {
712 bat_dbg(DBG_TT, bat_priv,
713 "Deleting global tt entry %pM "
714 "(via %pM): originator time out\n",
715 tt_global_entry->addr,
716 tt_global_entry->orig_node->orig);
717 hlist_del_rcu(node);
718 tt_global_entry_free_ref(tt_global_entry);
719 }
720 }
721 spin_unlock_bh(list_lock);
594 } 722 }
595 723 atomic_set(&orig_node->tt_size, 0);
596 spin_unlock_bh(&bat_priv->tt_ghash_lock);
597
598 orig_node->tt_buff_len = 0;
599 kfree(orig_node->tt_buff);
600 orig_node->tt_buff = NULL;
601} 724}
602 725
603static void tt_global_del(struct hlist_node *node, void *arg) 726static void tt_global_roam_purge(struct bat_priv *bat_priv)
604{ 727{
605 void *data = container_of(node, struct tt_global_entry, hash_entry); 728 struct hashtable_t *hash = bat_priv->tt_global_hash;
729 struct tt_global_entry *tt_global_entry;
730 struct hlist_node *node, *node_tmp;
731 struct hlist_head *head;
732 spinlock_t *list_lock; /* protects write access to the hash lists */
733 int i;
734
735 for (i = 0; i < hash->size; i++) {
736 head = &hash->table[i];
737 list_lock = &hash->list_locks[i];
738
739 spin_lock_bh(list_lock);
740 hlist_for_each_entry_safe(tt_global_entry, node, node_tmp,
741 head, hash_entry) {
742 if (!(tt_global_entry->flags & TT_CLIENT_ROAM))
743 continue;
744 if (!is_out_of_time(tt_global_entry->roam_at,
745 TT_CLIENT_ROAM_TIMEOUT * 1000))
746 continue;
747
748 bat_dbg(DBG_TT, bat_priv, "Deleting global "
749 "tt entry (%pM): Roaming timeout\n",
750 tt_global_entry->addr);
751 atomic_dec(&tt_global_entry->orig_node->tt_size);
752 hlist_del_rcu(node);
753 tt_global_entry_free_ref(tt_global_entry);
754 }
755 spin_unlock_bh(list_lock);
756 }
606 757
607 kfree(data);
608} 758}
609 759
610void tt_global_free(struct bat_priv *bat_priv) 760static void tt_global_table_free(struct bat_priv *bat_priv)
611{ 761{
762 struct hashtable_t *hash;
763 spinlock_t *list_lock; /* protects write access to the hash lists */
764 struct tt_global_entry *tt_global_entry;
765 struct hlist_node *node, *node_tmp;
766 struct hlist_head *head;
767 int i;
768
612 if (!bat_priv->tt_global_hash) 769 if (!bat_priv->tt_global_hash)
613 return; 770 return;
614 771
615 hash_delete(bat_priv->tt_global_hash, tt_global_del, NULL); 772 hash = bat_priv->tt_global_hash;
773
774 for (i = 0; i < hash->size; i++) {
775 head = &hash->table[i];
776 list_lock = &hash->list_locks[i];
777
778 spin_lock_bh(list_lock);
779 hlist_for_each_entry_safe(tt_global_entry, node, node_tmp,
780 head, hash_entry) {
781 hlist_del_rcu(node);
782 tt_global_entry_free_ref(tt_global_entry);
783 }
784 spin_unlock_bh(list_lock);
785 }
786
787 hash_destroy(hash);
788
616 bat_priv->tt_global_hash = NULL; 789 bat_priv->tt_global_hash = NULL;
617} 790}
618 791
619struct orig_node *transtable_search(struct bat_priv *bat_priv, uint8_t *addr) 792struct orig_node *transtable_search(struct bat_priv *bat_priv,
793 const uint8_t *addr)
620{ 794{
621 struct tt_global_entry *tt_global_entry; 795 struct tt_global_entry *tt_global_entry;
622 struct orig_node *orig_node = NULL; 796 struct orig_node *orig_node = NULL;
623 797
624 spin_lock_bh(&bat_priv->tt_ghash_lock);
625 tt_global_entry = tt_global_hash_find(bat_priv, addr); 798 tt_global_entry = tt_global_hash_find(bat_priv, addr);
626 799
627 if (!tt_global_entry) 800 if (!tt_global_entry)
628 goto out; 801 goto out;
629 802
630 if (!atomic_inc_not_zero(&tt_global_entry->orig_node->refcount)) 803 if (!atomic_inc_not_zero(&tt_global_entry->orig_node->refcount))
631 goto out; 804 goto free_tt;
805
806 /* A global client marked as PENDING has already moved from that
807 * originator */
808 if (tt_global_entry->flags & TT_CLIENT_PENDING)
809 goto free_tt;
632 810
633 orig_node = tt_global_entry->orig_node; 811 orig_node = tt_global_entry->orig_node;
634 812
813free_tt:
814 tt_global_entry_free_ref(tt_global_entry);
635out: 815out:
636 spin_unlock_bh(&bat_priv->tt_ghash_lock);
637 return orig_node; 816 return orig_node;
638} 817}
818
819/* Calculates the checksum of the local table of a given orig_node */
820uint16_t tt_global_crc(struct bat_priv *bat_priv, struct orig_node *orig_node)
821{
822 uint16_t total = 0, total_one;
823 struct hashtable_t *hash = bat_priv->tt_global_hash;
824 struct tt_global_entry *tt_global_entry;
825 struct hlist_node *node;
826 struct hlist_head *head;
827 int i, j;
828
829 for (i = 0; i < hash->size; i++) {
830 head = &hash->table[i];
831
832 rcu_read_lock();
833 hlist_for_each_entry_rcu(tt_global_entry, node,
834 head, hash_entry) {
835 if (compare_eth(tt_global_entry->orig_node,
836 orig_node)) {
837 /* Roaming clients are in the global table for
838 * consistency only. They don't have to be
839 * taken into account while computing the
840 * global crc */
841 if (tt_global_entry->flags & TT_CLIENT_ROAM)
842 continue;
843 total_one = 0;
844 for (j = 0; j < ETH_ALEN; j++)
845 total_one = crc16_byte(total_one,
846 tt_global_entry->addr[j]);
847 total ^= total_one;
848 }
849 }
850 rcu_read_unlock();
851 }
852
853 return total;
854}
855
856/* Calculates the checksum of the local table */
857uint16_t tt_local_crc(struct bat_priv *bat_priv)
858{
859 uint16_t total = 0, total_one;
860 struct hashtable_t *hash = bat_priv->tt_local_hash;
861 struct tt_local_entry *tt_local_entry;
862 struct hlist_node *node;
863 struct hlist_head *head;
864 int i, j;
865
866 for (i = 0; i < hash->size; i++) {
867 head = &hash->table[i];
868
869 rcu_read_lock();
870 hlist_for_each_entry_rcu(tt_local_entry, node,
871 head, hash_entry) {
872 /* not yet committed clients have not to be taken into
873 * account while computing the CRC */
874 if (tt_local_entry->flags & TT_CLIENT_NEW)
875 continue;
876 total_one = 0;
877 for (j = 0; j < ETH_ALEN; j++)
878 total_one = crc16_byte(total_one,
879 tt_local_entry->addr[j]);
880 total ^= total_one;
881 }
882 rcu_read_unlock();
883 }
884
885 return total;
886}
887
888static void tt_req_list_free(struct bat_priv *bat_priv)
889{
890 struct tt_req_node *node, *safe;
891
892 spin_lock_bh(&bat_priv->tt_req_list_lock);
893
894 list_for_each_entry_safe(node, safe, &bat_priv->tt_req_list, list) {
895 list_del(&node->list);
896 kfree(node);
897 }
898
899 spin_unlock_bh(&bat_priv->tt_req_list_lock);
900}
901
902void tt_save_orig_buffer(struct bat_priv *bat_priv, struct orig_node *orig_node,
903 const unsigned char *tt_buff, uint8_t tt_num_changes)
904{
905 uint16_t tt_buff_len = tt_len(tt_num_changes);
906
907 /* Replace the old buffer only if I received something in the
908 * last OGM (the OGM could carry no changes) */
909 spin_lock_bh(&orig_node->tt_buff_lock);
910 if (tt_buff_len > 0) {
911 kfree(orig_node->tt_buff);
912 orig_node->tt_buff_len = 0;
913 orig_node->tt_buff = kmalloc(tt_buff_len, GFP_ATOMIC);
914 if (orig_node->tt_buff) {
915 memcpy(orig_node->tt_buff, tt_buff, tt_buff_len);
916 orig_node->tt_buff_len = tt_buff_len;
917 }
918 }
919 spin_unlock_bh(&orig_node->tt_buff_lock);
920}
921
922static void tt_req_purge(struct bat_priv *bat_priv)
923{
924 struct tt_req_node *node, *safe;
925
926 spin_lock_bh(&bat_priv->tt_req_list_lock);
927 list_for_each_entry_safe(node, safe, &bat_priv->tt_req_list, list) {
928 if (is_out_of_time(node->issued_at,
929 TT_REQUEST_TIMEOUT * 1000)) {
930 list_del(&node->list);
931 kfree(node);
932 }
933 }
934 spin_unlock_bh(&bat_priv->tt_req_list_lock);
935}
936
937/* returns the pointer to the new tt_req_node struct if no request
938 * has already been issued for this orig_node, NULL otherwise */
939static struct tt_req_node *new_tt_req_node(struct bat_priv *bat_priv,
940 struct orig_node *orig_node)
941{
942 struct tt_req_node *tt_req_node_tmp, *tt_req_node = NULL;
943
944 spin_lock_bh(&bat_priv->tt_req_list_lock);
945 list_for_each_entry(tt_req_node_tmp, &bat_priv->tt_req_list, list) {
946 if (compare_eth(tt_req_node_tmp, orig_node) &&
947 !is_out_of_time(tt_req_node_tmp->issued_at,
948 TT_REQUEST_TIMEOUT * 1000))
949 goto unlock;
950 }
951
952 tt_req_node = kmalloc(sizeof(*tt_req_node), GFP_ATOMIC);
953 if (!tt_req_node)
954 goto unlock;
955
956 memcpy(tt_req_node->addr, orig_node->orig, ETH_ALEN);
957 tt_req_node->issued_at = jiffies;
958
959 list_add(&tt_req_node->list, &bat_priv->tt_req_list);
960unlock:
961 spin_unlock_bh(&bat_priv->tt_req_list_lock);
962 return tt_req_node;
963}
964
965/* data_ptr is useless here, but has to be kept to respect the prototype */
966static int tt_local_valid_entry(const void *entry_ptr, const void *data_ptr)
967{
968 const struct tt_local_entry *tt_local_entry = entry_ptr;
969
970 if (tt_local_entry->flags & TT_CLIENT_NEW)
971 return 0;
972 return 1;
973}
974
975static int tt_global_valid_entry(const void *entry_ptr, const void *data_ptr)
976{
977 const struct tt_global_entry *tt_global_entry = entry_ptr;
978 const struct orig_node *orig_node = data_ptr;
979
980 if (tt_global_entry->flags & TT_CLIENT_ROAM)
981 return 0;
982
983 return (tt_global_entry->orig_node == orig_node);
984}
985
986static struct sk_buff *tt_response_fill_table(uint16_t tt_len, uint8_t ttvn,
987 struct hashtable_t *hash,
988 struct hard_iface *primary_if,
989 int (*valid_cb)(const void *,
990 const void *),
991 void *cb_data)
992{
993 struct tt_local_entry *tt_local_entry;
994 struct tt_query_packet *tt_response;
995 struct tt_change *tt_change;
996 struct hlist_node *node;
997 struct hlist_head *head;
998 struct sk_buff *skb = NULL;
999 uint16_t tt_tot, tt_count;
1000 ssize_t tt_query_size = sizeof(struct tt_query_packet);
1001 int i;
1002
1003 if (tt_query_size + tt_len > primary_if->soft_iface->mtu) {
1004 tt_len = primary_if->soft_iface->mtu - tt_query_size;
1005 tt_len -= tt_len % sizeof(struct tt_change);
1006 }
1007 tt_tot = tt_len / sizeof(struct tt_change);
1008
1009 skb = dev_alloc_skb(tt_query_size + tt_len + ETH_HLEN);
1010 if (!skb)
1011 goto out;
1012
1013 skb_reserve(skb, ETH_HLEN);
1014 tt_response = (struct tt_query_packet *)skb_put(skb,
1015 tt_query_size + tt_len);
1016 tt_response->ttvn = ttvn;
1017
1018 tt_change = (struct tt_change *)(skb->data + tt_query_size);
1019 tt_count = 0;
1020
1021 rcu_read_lock();
1022 for (i = 0; i < hash->size; i++) {
1023 head = &hash->table[i];
1024
1025 hlist_for_each_entry_rcu(tt_local_entry, node,
1026 head, hash_entry) {
1027 if (tt_count == tt_tot)
1028 break;
1029
1030 if ((valid_cb) && (!valid_cb(tt_local_entry, cb_data)))
1031 continue;
1032
1033 memcpy(tt_change->addr, tt_local_entry->addr, ETH_ALEN);
1034 tt_change->flags = NO_FLAGS;
1035
1036 tt_count++;
1037 tt_change++;
1038 }
1039 }
1040 rcu_read_unlock();
1041
1042 /* store in the message the number of entries we have successfully
1043 * copied */
1044 tt_response->tt_data = htons(tt_count);
1045
1046out:
1047 return skb;
1048}
1049
1050int send_tt_request(struct bat_priv *bat_priv, struct orig_node *dst_orig_node,
1051 uint8_t ttvn, uint16_t tt_crc, bool full_table)
1052{
1053 struct sk_buff *skb = NULL;
1054 struct tt_query_packet *tt_request;
1055 struct neigh_node *neigh_node = NULL;
1056 struct hard_iface *primary_if;
1057 struct tt_req_node *tt_req_node = NULL;
1058 int ret = 1;
1059
1060 primary_if = primary_if_get_selected(bat_priv);
1061 if (!primary_if)
1062 goto out;
1063
1064 /* The new tt_req will be issued only if I'm not waiting for a
1065 * reply from the same orig_node yet */
1066 tt_req_node = new_tt_req_node(bat_priv, dst_orig_node);
1067 if (!tt_req_node)
1068 goto out;
1069
1070 skb = dev_alloc_skb(sizeof(struct tt_query_packet) + ETH_HLEN);
1071 if (!skb)
1072 goto out;
1073
1074 skb_reserve(skb, ETH_HLEN);
1075
1076 tt_request = (struct tt_query_packet *)skb_put(skb,
1077 sizeof(struct tt_query_packet));
1078
1079 tt_request->packet_type = BAT_TT_QUERY;
1080 tt_request->version = COMPAT_VERSION;
1081 memcpy(tt_request->src, primary_if->net_dev->dev_addr, ETH_ALEN);
1082 memcpy(tt_request->dst, dst_orig_node->orig, ETH_ALEN);
1083 tt_request->ttl = TTL;
1084 tt_request->ttvn = ttvn;
1085 tt_request->tt_data = tt_crc;
1086 tt_request->flags = TT_REQUEST;
1087
1088 if (full_table)
1089 tt_request->flags |= TT_FULL_TABLE;
1090
1091 neigh_node = orig_node_get_router(dst_orig_node);
1092 if (!neigh_node)
1093 goto out;
1094
1095 bat_dbg(DBG_TT, bat_priv, "Sending TT_REQUEST to %pM via %pM "
1096 "[%c]\n", dst_orig_node->orig, neigh_node->addr,
1097 (full_table ? 'F' : '.'));
1098
1099 send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr);
1100 ret = 0;
1101
1102out:
1103 if (neigh_node)
1104 neigh_node_free_ref(neigh_node);
1105 if (primary_if)
1106 hardif_free_ref(primary_if);
1107 if (ret)
1108 kfree_skb(skb);
1109 if (ret && tt_req_node) {
1110 spin_lock_bh(&bat_priv->tt_req_list_lock);
1111 list_del(&tt_req_node->list);
1112 spin_unlock_bh(&bat_priv->tt_req_list_lock);
1113 kfree(tt_req_node);
1114 }
1115 return ret;
1116}
1117
1118static bool send_other_tt_response(struct bat_priv *bat_priv,
1119 struct tt_query_packet *tt_request)
1120{
1121 struct orig_node *req_dst_orig_node = NULL, *res_dst_orig_node = NULL;
1122 struct neigh_node *neigh_node = NULL;
1123 struct hard_iface *primary_if = NULL;
1124 uint8_t orig_ttvn, req_ttvn, ttvn;
1125 int ret = false;
1126 unsigned char *tt_buff;
1127 bool full_table;
1128 uint16_t tt_len, tt_tot;
1129 struct sk_buff *skb = NULL;
1130 struct tt_query_packet *tt_response;
1131
1132 bat_dbg(DBG_TT, bat_priv,
1133 "Received TT_REQUEST from %pM for "
1134 "ttvn: %u (%pM) [%c]\n", tt_request->src,
1135 tt_request->ttvn, tt_request->dst,
1136 (tt_request->flags & TT_FULL_TABLE ? 'F' : '.'));
1137
1138 /* Let's get the orig node of the REAL destination */
1139 req_dst_orig_node = get_orig_node(bat_priv, tt_request->dst);
1140 if (!req_dst_orig_node)
1141 goto out;
1142
1143 res_dst_orig_node = get_orig_node(bat_priv, tt_request->src);
1144 if (!res_dst_orig_node)
1145 goto out;
1146
1147 neigh_node = orig_node_get_router(res_dst_orig_node);
1148 if (!neigh_node)
1149 goto out;
1150
1151 primary_if = primary_if_get_selected(bat_priv);
1152 if (!primary_if)
1153 goto out;
1154
1155 orig_ttvn = (uint8_t)atomic_read(&req_dst_orig_node->last_ttvn);
1156 req_ttvn = tt_request->ttvn;
1157
1158 /* I have not the requested data */
1159 if (orig_ttvn != req_ttvn ||
1160 tt_request->tt_data != req_dst_orig_node->tt_crc)
1161 goto out;
1162
1163 /* If it has explicitly been requested the full table */
1164 if (tt_request->flags & TT_FULL_TABLE ||
1165 !req_dst_orig_node->tt_buff)
1166 full_table = true;
1167 else
1168 full_table = false;
1169
1170 /* In this version, fragmentation is not implemented, then
1171 * I'll send only one packet with as much TT entries as I can */
1172 if (!full_table) {
1173 spin_lock_bh(&req_dst_orig_node->tt_buff_lock);
1174 tt_len = req_dst_orig_node->tt_buff_len;
1175 tt_tot = tt_len / sizeof(struct tt_change);
1176
1177 skb = dev_alloc_skb(sizeof(struct tt_query_packet) +
1178 tt_len + ETH_HLEN);
1179 if (!skb)
1180 goto unlock;
1181
1182 skb_reserve(skb, ETH_HLEN);
1183 tt_response = (struct tt_query_packet *)skb_put(skb,
1184 sizeof(struct tt_query_packet) + tt_len);
1185 tt_response->ttvn = req_ttvn;
1186 tt_response->tt_data = htons(tt_tot);
1187
1188 tt_buff = skb->data + sizeof(struct tt_query_packet);
1189 /* Copy the last orig_node's OGM buffer */
1190 memcpy(tt_buff, req_dst_orig_node->tt_buff,
1191 req_dst_orig_node->tt_buff_len);
1192
1193 spin_unlock_bh(&req_dst_orig_node->tt_buff_lock);
1194 } else {
1195 tt_len = (uint16_t)atomic_read(&req_dst_orig_node->tt_size) *
1196 sizeof(struct tt_change);
1197 ttvn = (uint8_t)atomic_read(&req_dst_orig_node->last_ttvn);
1198
1199 skb = tt_response_fill_table(tt_len, ttvn,
1200 bat_priv->tt_global_hash,
1201 primary_if, tt_global_valid_entry,
1202 req_dst_orig_node);
1203 if (!skb)
1204 goto out;
1205
1206 tt_response = (struct tt_query_packet *)skb->data;
1207 }
1208
1209 tt_response->packet_type = BAT_TT_QUERY;
1210 tt_response->version = COMPAT_VERSION;
1211 tt_response->ttl = TTL;
1212 memcpy(tt_response->src, req_dst_orig_node->orig, ETH_ALEN);
1213 memcpy(tt_response->dst, tt_request->src, ETH_ALEN);
1214 tt_response->flags = TT_RESPONSE;
1215
1216 if (full_table)
1217 tt_response->flags |= TT_FULL_TABLE;
1218
1219 bat_dbg(DBG_TT, bat_priv,
1220 "Sending TT_RESPONSE %pM via %pM for %pM (ttvn: %u)\n",
1221 res_dst_orig_node->orig, neigh_node->addr,
1222 req_dst_orig_node->orig, req_ttvn);
1223
1224 send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr);
1225 ret = true;
1226 goto out;
1227
1228unlock:
1229 spin_unlock_bh(&req_dst_orig_node->tt_buff_lock);
1230
1231out:
1232 if (res_dst_orig_node)
1233 orig_node_free_ref(res_dst_orig_node);
1234 if (req_dst_orig_node)
1235 orig_node_free_ref(req_dst_orig_node);
1236 if (neigh_node)
1237 neigh_node_free_ref(neigh_node);
1238 if (primary_if)
1239 hardif_free_ref(primary_if);
1240 if (!ret)
1241 kfree_skb(skb);
1242 return ret;
1243
1244}
1245static bool send_my_tt_response(struct bat_priv *bat_priv,
1246 struct tt_query_packet *tt_request)
1247{
1248 struct orig_node *orig_node = NULL;
1249 struct neigh_node *neigh_node = NULL;
1250 struct hard_iface *primary_if = NULL;
1251 uint8_t my_ttvn, req_ttvn, ttvn;
1252 int ret = false;
1253 unsigned char *tt_buff;
1254 bool full_table;
1255 uint16_t tt_len, tt_tot;
1256 struct sk_buff *skb = NULL;
1257 struct tt_query_packet *tt_response;
1258
1259 bat_dbg(DBG_TT, bat_priv,
1260 "Received TT_REQUEST from %pM for "
1261 "ttvn: %u (me) [%c]\n", tt_request->src,
1262 tt_request->ttvn,
1263 (tt_request->flags & TT_FULL_TABLE ? 'F' : '.'));
1264
1265
1266 my_ttvn = (uint8_t)atomic_read(&bat_priv->ttvn);
1267 req_ttvn = tt_request->ttvn;
1268
1269 orig_node = get_orig_node(bat_priv, tt_request->src);
1270 if (!orig_node)
1271 goto out;
1272
1273 neigh_node = orig_node_get_router(orig_node);
1274 if (!neigh_node)
1275 goto out;
1276
1277 primary_if = primary_if_get_selected(bat_priv);
1278 if (!primary_if)
1279 goto out;
1280
1281 /* If the full table has been explicitly requested or the gap
1282 * is too big send the whole local translation table */
1283 if (tt_request->flags & TT_FULL_TABLE || my_ttvn != req_ttvn ||
1284 !bat_priv->tt_buff)
1285 full_table = true;
1286 else
1287 full_table = false;
1288
1289 /* In this version, fragmentation is not implemented, then
1290 * I'll send only one packet with as much TT entries as I can */
1291 if (!full_table) {
1292 spin_lock_bh(&bat_priv->tt_buff_lock);
1293 tt_len = bat_priv->tt_buff_len;
1294 tt_tot = tt_len / sizeof(struct tt_change);
1295
1296 skb = dev_alloc_skb(sizeof(struct tt_query_packet) +
1297 tt_len + ETH_HLEN);
1298 if (!skb)
1299 goto unlock;
1300
1301 skb_reserve(skb, ETH_HLEN);
1302 tt_response = (struct tt_query_packet *)skb_put(skb,
1303 sizeof(struct tt_query_packet) + tt_len);
1304 tt_response->ttvn = req_ttvn;
1305 tt_response->tt_data = htons(tt_tot);
1306
1307 tt_buff = skb->data + sizeof(struct tt_query_packet);
1308 memcpy(tt_buff, bat_priv->tt_buff,
1309 bat_priv->tt_buff_len);
1310 spin_unlock_bh(&bat_priv->tt_buff_lock);
1311 } else {
1312 tt_len = (uint16_t)atomic_read(&bat_priv->num_local_tt) *
1313 sizeof(struct tt_change);
1314 ttvn = (uint8_t)atomic_read(&bat_priv->ttvn);
1315
1316 skb = tt_response_fill_table(tt_len, ttvn,
1317 bat_priv->tt_local_hash,
1318 primary_if, tt_local_valid_entry,
1319 NULL);
1320 if (!skb)
1321 goto out;
1322
1323 tt_response = (struct tt_query_packet *)skb->data;
1324 }
1325
1326 tt_response->packet_type = BAT_TT_QUERY;
1327 tt_response->version = COMPAT_VERSION;
1328 tt_response->ttl = TTL;
1329 memcpy(tt_response->src, primary_if->net_dev->dev_addr, ETH_ALEN);
1330 memcpy(tt_response->dst, tt_request->src, ETH_ALEN);
1331 tt_response->flags = TT_RESPONSE;
1332
1333 if (full_table)
1334 tt_response->flags |= TT_FULL_TABLE;
1335
1336 bat_dbg(DBG_TT, bat_priv,
1337 "Sending TT_RESPONSE to %pM via %pM [%c]\n",
1338 orig_node->orig, neigh_node->addr,
1339 (tt_response->flags & TT_FULL_TABLE ? 'F' : '.'));
1340
1341 send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr);
1342 ret = true;
1343 goto out;
1344
1345unlock:
1346 spin_unlock_bh(&bat_priv->tt_buff_lock);
1347out:
1348 if (orig_node)
1349 orig_node_free_ref(orig_node);
1350 if (neigh_node)
1351 neigh_node_free_ref(neigh_node);
1352 if (primary_if)
1353 hardif_free_ref(primary_if);
1354 if (!ret)
1355 kfree_skb(skb);
1356 /* This packet was for me, so it doesn't need to be re-routed */
1357 return true;
1358}
1359
1360bool send_tt_response(struct bat_priv *bat_priv,
1361 struct tt_query_packet *tt_request)
1362{
1363 if (is_my_mac(tt_request->dst))
1364 return send_my_tt_response(bat_priv, tt_request);
1365 else
1366 return send_other_tt_response(bat_priv, tt_request);
1367}
1368
1369static void _tt_update_changes(struct bat_priv *bat_priv,
1370 struct orig_node *orig_node,
1371 struct tt_change *tt_change,
1372 uint16_t tt_num_changes, uint8_t ttvn)
1373{
1374 int i;
1375
1376 for (i = 0; i < tt_num_changes; i++) {
1377 if ((tt_change + i)->flags & TT_CLIENT_DEL)
1378 tt_global_del(bat_priv, orig_node,
1379 (tt_change + i)->addr,
1380 "tt removed by changes",
1381 (tt_change + i)->flags & TT_CLIENT_ROAM);
1382 else
1383 if (!tt_global_add(bat_priv, orig_node,
1384 (tt_change + i)->addr, ttvn, false))
1385 /* In case of problem while storing a
1386 * global_entry, we stop the updating
1387 * procedure without committing the
1388 * ttvn change. This will avoid to send
1389 * corrupted data on tt_request
1390 */
1391 return;
1392 }
1393}
1394
1395static void tt_fill_gtable(struct bat_priv *bat_priv,
1396 struct tt_query_packet *tt_response)
1397{
1398 struct orig_node *orig_node = NULL;
1399
1400 orig_node = orig_hash_find(bat_priv, tt_response->src);
1401 if (!orig_node)
1402 goto out;
1403
1404 /* Purge the old table first.. */
1405 tt_global_del_orig(bat_priv, orig_node, "Received full table");
1406
1407 _tt_update_changes(bat_priv, orig_node,
1408 (struct tt_change *)(tt_response + 1),
1409 tt_response->tt_data, tt_response->ttvn);
1410
1411 spin_lock_bh(&orig_node->tt_buff_lock);
1412 kfree(orig_node->tt_buff);
1413 orig_node->tt_buff_len = 0;
1414 orig_node->tt_buff = NULL;
1415 spin_unlock_bh(&orig_node->tt_buff_lock);
1416
1417 atomic_set(&orig_node->last_ttvn, tt_response->ttvn);
1418
1419out:
1420 if (orig_node)
1421 orig_node_free_ref(orig_node);
1422}
1423
1424void tt_update_changes(struct bat_priv *bat_priv, struct orig_node *orig_node,
1425 uint16_t tt_num_changes, uint8_t ttvn,
1426 struct tt_change *tt_change)
1427{
1428 _tt_update_changes(bat_priv, orig_node, tt_change, tt_num_changes,
1429 ttvn);
1430
1431 tt_save_orig_buffer(bat_priv, orig_node, (unsigned char *)tt_change,
1432 tt_num_changes);
1433 atomic_set(&orig_node->last_ttvn, ttvn);
1434}
1435
1436bool is_my_client(struct bat_priv *bat_priv, const uint8_t *addr)
1437{
1438 struct tt_local_entry *tt_local_entry = NULL;
1439 bool ret = false;
1440
1441 tt_local_entry = tt_local_hash_find(bat_priv, addr);
1442 if (!tt_local_entry)
1443 goto out;
1444 /* Check if the client has been logically deleted (but is kept for
1445 * consistency purpose) */
1446 if (tt_local_entry->flags & TT_CLIENT_PENDING)
1447 goto out;
1448 ret = true;
1449out:
1450 if (tt_local_entry)
1451 tt_local_entry_free_ref(tt_local_entry);
1452 return ret;
1453}
1454
1455void handle_tt_response(struct bat_priv *bat_priv,
1456 struct tt_query_packet *tt_response)
1457{
1458 struct tt_req_node *node, *safe;
1459 struct orig_node *orig_node = NULL;
1460
1461 bat_dbg(DBG_TT, bat_priv, "Received TT_RESPONSE from %pM for "
1462 "ttvn %d t_size: %d [%c]\n",
1463 tt_response->src, tt_response->ttvn,
1464 tt_response->tt_data,
1465 (tt_response->flags & TT_FULL_TABLE ? 'F' : '.'));
1466
1467 orig_node = orig_hash_find(bat_priv, tt_response->src);
1468 if (!orig_node)
1469 goto out;
1470
1471 if (tt_response->flags & TT_FULL_TABLE)
1472 tt_fill_gtable(bat_priv, tt_response);
1473 else
1474 tt_update_changes(bat_priv, orig_node, tt_response->tt_data,
1475 tt_response->ttvn,
1476 (struct tt_change *)(tt_response + 1));
1477
1478 /* Delete the tt_req_node from pending tt_requests list */
1479 spin_lock_bh(&bat_priv->tt_req_list_lock);
1480 list_for_each_entry_safe(node, safe, &bat_priv->tt_req_list, list) {
1481 if (!compare_eth(node->addr, tt_response->src))
1482 continue;
1483 list_del(&node->list);
1484 kfree(node);
1485 }
1486 spin_unlock_bh(&bat_priv->tt_req_list_lock);
1487
1488 /* Recalculate the CRC for this orig_node and store it */
1489 orig_node->tt_crc = tt_global_crc(bat_priv, orig_node);
1490 /* Roaming phase is over: tables are in sync again. I can
1491 * unset the flag */
1492 orig_node->tt_poss_change = false;
1493out:
1494 if (orig_node)
1495 orig_node_free_ref(orig_node);
1496}
1497
1498int tt_init(struct bat_priv *bat_priv)
1499{
1500 if (!tt_local_init(bat_priv))
1501 return 0;
1502
1503 if (!tt_global_init(bat_priv))
1504 return 0;
1505
1506 tt_start_timer(bat_priv);
1507
1508 return 1;
1509}
1510
1511static void tt_roam_list_free(struct bat_priv *bat_priv)
1512{
1513 struct tt_roam_node *node, *safe;
1514
1515 spin_lock_bh(&bat_priv->tt_roam_list_lock);
1516
1517 list_for_each_entry_safe(node, safe, &bat_priv->tt_roam_list, list) {
1518 list_del(&node->list);
1519 kfree(node);
1520 }
1521
1522 spin_unlock_bh(&bat_priv->tt_roam_list_lock);
1523}
1524
1525static void tt_roam_purge(struct bat_priv *bat_priv)
1526{
1527 struct tt_roam_node *node, *safe;
1528
1529 spin_lock_bh(&bat_priv->tt_roam_list_lock);
1530 list_for_each_entry_safe(node, safe, &bat_priv->tt_roam_list, list) {
1531 if (!is_out_of_time(node->first_time,
1532 ROAMING_MAX_TIME * 1000))
1533 continue;
1534
1535 list_del(&node->list);
1536 kfree(node);
1537 }
1538 spin_unlock_bh(&bat_priv->tt_roam_list_lock);
1539}
1540
1541/* This function checks whether the client already reached the
1542 * maximum number of possible roaming phases. In this case the ROAMING_ADV
1543 * will not be sent.
1544 *
1545 * returns true if the ROAMING_ADV can be sent, false otherwise */
1546static bool tt_check_roam_count(struct bat_priv *bat_priv,
1547 uint8_t *client)
1548{
1549 struct tt_roam_node *tt_roam_node;
1550 bool ret = false;
1551
1552 spin_lock_bh(&bat_priv->tt_roam_list_lock);
1553 /* The new tt_req will be issued only if I'm not waiting for a
1554 * reply from the same orig_node yet */
1555 list_for_each_entry(tt_roam_node, &bat_priv->tt_roam_list, list) {
1556 if (!compare_eth(tt_roam_node->addr, client))
1557 continue;
1558
1559 if (is_out_of_time(tt_roam_node->first_time,
1560 ROAMING_MAX_TIME * 1000))
1561 continue;
1562
1563 if (!atomic_dec_not_zero(&tt_roam_node->counter))
1564 /* Sorry, you roamed too many times! */
1565 goto unlock;
1566 ret = true;
1567 break;
1568 }
1569
1570 if (!ret) {
1571 tt_roam_node = kmalloc(sizeof(*tt_roam_node), GFP_ATOMIC);
1572 if (!tt_roam_node)
1573 goto unlock;
1574
1575 tt_roam_node->first_time = jiffies;
1576 atomic_set(&tt_roam_node->counter, ROAMING_MAX_COUNT - 1);
1577 memcpy(tt_roam_node->addr, client, ETH_ALEN);
1578
1579 list_add(&tt_roam_node->list, &bat_priv->tt_roam_list);
1580 ret = true;
1581 }
1582
1583unlock:
1584 spin_unlock_bh(&bat_priv->tt_roam_list_lock);
1585 return ret;
1586}
1587
1588void send_roam_adv(struct bat_priv *bat_priv, uint8_t *client,
1589 struct orig_node *orig_node)
1590{
1591 struct neigh_node *neigh_node = NULL;
1592 struct sk_buff *skb = NULL;
1593 struct roam_adv_packet *roam_adv_packet;
1594 int ret = 1;
1595 struct hard_iface *primary_if;
1596
1597 /* before going on we have to check whether the client has
1598 * already roamed to us too many times */
1599 if (!tt_check_roam_count(bat_priv, client))
1600 goto out;
1601
1602 skb = dev_alloc_skb(sizeof(struct roam_adv_packet) + ETH_HLEN);
1603 if (!skb)
1604 goto out;
1605
1606 skb_reserve(skb, ETH_HLEN);
1607
1608 roam_adv_packet = (struct roam_adv_packet *)skb_put(skb,
1609 sizeof(struct roam_adv_packet));
1610
1611 roam_adv_packet->packet_type = BAT_ROAM_ADV;
1612 roam_adv_packet->version = COMPAT_VERSION;
1613 roam_adv_packet->ttl = TTL;
1614 primary_if = primary_if_get_selected(bat_priv);
1615 if (!primary_if)
1616 goto out;
1617 memcpy(roam_adv_packet->src, primary_if->net_dev->dev_addr, ETH_ALEN);
1618 hardif_free_ref(primary_if);
1619 memcpy(roam_adv_packet->dst, orig_node->orig, ETH_ALEN);
1620 memcpy(roam_adv_packet->client, client, ETH_ALEN);
1621
1622 neigh_node = orig_node_get_router(orig_node);
1623 if (!neigh_node)
1624 goto out;
1625
1626 bat_dbg(DBG_TT, bat_priv,
1627 "Sending ROAMING_ADV to %pM (client %pM) via %pM\n",
1628 orig_node->orig, client, neigh_node->addr);
1629
1630 send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr);
1631 ret = 0;
1632
1633out:
1634 if (neigh_node)
1635 neigh_node_free_ref(neigh_node);
1636 if (ret)
1637 kfree_skb(skb);
1638 return;
1639}
1640
1641static void tt_purge(struct work_struct *work)
1642{
1643 struct delayed_work *delayed_work =
1644 container_of(work, struct delayed_work, work);
1645 struct bat_priv *bat_priv =
1646 container_of(delayed_work, struct bat_priv, tt_work);
1647
1648 tt_local_purge(bat_priv);
1649 tt_global_roam_purge(bat_priv);
1650 tt_req_purge(bat_priv);
1651 tt_roam_purge(bat_priv);
1652
1653 tt_start_timer(bat_priv);
1654}
1655
1656void tt_free(struct bat_priv *bat_priv)
1657{
1658 cancel_delayed_work_sync(&bat_priv->tt_work);
1659
1660 tt_local_table_free(bat_priv);
1661 tt_global_table_free(bat_priv);
1662 tt_req_list_free(bat_priv);
1663 tt_changes_list_free(bat_priv);
1664 tt_roam_list_free(bat_priv);
1665
1666 kfree(bat_priv->tt_buff);
1667}
1668
1669/* This function will reset the specified flags from all the entries in
1670 * the given hash table and will increment num_local_tt for each involved
1671 * entry */
1672static void tt_local_reset_flags(struct bat_priv *bat_priv, uint16_t flags)
1673{
1674 int i;
1675 struct hashtable_t *hash = bat_priv->tt_local_hash;
1676 struct hlist_head *head;
1677 struct hlist_node *node;
1678 struct tt_local_entry *tt_local_entry;
1679
1680 if (!hash)
1681 return;
1682
1683 for (i = 0; i < hash->size; i++) {
1684 head = &hash->table[i];
1685
1686 rcu_read_lock();
1687 hlist_for_each_entry_rcu(tt_local_entry, node,
1688 head, hash_entry) {
1689 if (!(tt_local_entry->flags & flags))
1690 continue;
1691 tt_local_entry->flags &= ~flags;
1692 atomic_inc(&bat_priv->num_local_tt);
1693 }
1694 rcu_read_unlock();
1695 }
1696
1697}
1698
1699/* Purge out all the tt local entries marked with TT_CLIENT_PENDING */
1700static void tt_local_purge_pending_clients(struct bat_priv *bat_priv)
1701{
1702 struct hashtable_t *hash = bat_priv->tt_local_hash;
1703 struct tt_local_entry *tt_local_entry;
1704 struct hlist_node *node, *node_tmp;
1705 struct hlist_head *head;
1706 spinlock_t *list_lock; /* protects write access to the hash lists */
1707 int i;
1708
1709 if (!hash)
1710 return;
1711
1712 for (i = 0; i < hash->size; i++) {
1713 head = &hash->table[i];
1714 list_lock = &hash->list_locks[i];
1715
1716 spin_lock_bh(list_lock);
1717 hlist_for_each_entry_safe(tt_local_entry, node, node_tmp,
1718 head, hash_entry) {
1719 if (!(tt_local_entry->flags & TT_CLIENT_PENDING))
1720 continue;
1721
1722 bat_dbg(DBG_TT, bat_priv, "Deleting local tt entry "
1723 "(%pM): pending\n", tt_local_entry->addr);
1724
1725 atomic_dec(&bat_priv->num_local_tt);
1726 hlist_del_rcu(node);
1727 tt_local_entry_free_ref(tt_local_entry);
1728 }
1729 spin_unlock_bh(list_lock);
1730 }
1731
1732}
1733
1734void tt_commit_changes(struct bat_priv *bat_priv)
1735{
1736 tt_local_reset_flags(bat_priv, TT_CLIENT_NEW);
1737 tt_local_purge_pending_clients(bat_priv);
1738
1739 /* Increment the TTVN only once per OGM interval */
1740 atomic_inc(&bat_priv->ttvn);
1741 bat_priv->tt_poss_change = false;
1742}
diff --git a/net/batman-adv/translation-table.h b/net/batman-adv/translation-table.h
index 46152c38cc9..d4122cba53b 100644
--- a/net/batman-adv/translation-table.h
+++ b/net/batman-adv/translation-table.h
@@ -22,22 +22,45 @@
22#ifndef _NET_BATMAN_ADV_TRANSLATION_TABLE_H_ 22#ifndef _NET_BATMAN_ADV_TRANSLATION_TABLE_H_
23#define _NET_BATMAN_ADV_TRANSLATION_TABLE_H_ 23#define _NET_BATMAN_ADV_TRANSLATION_TABLE_H_
24 24
25int tt_local_init(struct bat_priv *bat_priv); 25int tt_len(int changes_num);
26void tt_local_add(struct net_device *soft_iface, uint8_t *addr); 26int tt_changes_fill_buffer(struct bat_priv *bat_priv,
27 unsigned char *buff, int buff_len);
28int tt_init(struct bat_priv *bat_priv);
29void tt_local_add(struct net_device *soft_iface, const uint8_t *addr);
27void tt_local_remove(struct bat_priv *bat_priv, 30void tt_local_remove(struct bat_priv *bat_priv,
28 uint8_t *addr, char *message); 31 const uint8_t *addr, const char *message, bool roaming);
29int tt_local_fill_buffer(struct bat_priv *bat_priv,
30 unsigned char *buff, int buff_len);
31int tt_local_seq_print_text(struct seq_file *seq, void *offset); 32int tt_local_seq_print_text(struct seq_file *seq, void *offset);
32void tt_local_free(struct bat_priv *bat_priv); 33void tt_global_add_orig(struct bat_priv *bat_priv, struct orig_node *orig_node,
33int tt_global_init(struct bat_priv *bat_priv); 34 const unsigned char *tt_buff, int tt_buff_len);
34void tt_global_add_orig(struct bat_priv *bat_priv, 35int tt_global_add(struct bat_priv *bat_priv,
35 struct orig_node *orig_node, 36 struct orig_node *orig_node, const unsigned char *addr,
36 unsigned char *tt_buff, int tt_buff_len); 37 uint8_t ttvn, bool roaming);
37int tt_global_seq_print_text(struct seq_file *seq, void *offset); 38int tt_global_seq_print_text(struct seq_file *seq, void *offset);
38void tt_global_del_orig(struct bat_priv *bat_priv, 39void tt_global_del_orig(struct bat_priv *bat_priv,
39 struct orig_node *orig_node, char *message); 40 struct orig_node *orig_node, const char *message);
40void tt_global_free(struct bat_priv *bat_priv); 41void tt_global_del(struct bat_priv *bat_priv,
41struct orig_node *transtable_search(struct bat_priv *bat_priv, uint8_t *addr); 42 struct orig_node *orig_node, const unsigned char *addr,
43 const char *message, bool roaming);
44struct orig_node *transtable_search(struct bat_priv *bat_priv,
45 const uint8_t *addr);
46void tt_save_orig_buffer(struct bat_priv *bat_priv, struct orig_node *orig_node,
47 const unsigned char *tt_buff, uint8_t tt_num_changes);
48uint16_t tt_local_crc(struct bat_priv *bat_priv);
49uint16_t tt_global_crc(struct bat_priv *bat_priv, struct orig_node *orig_node);
50void tt_free(struct bat_priv *bat_priv);
51int send_tt_request(struct bat_priv *bat_priv,
52 struct orig_node *dst_orig_node, uint8_t hvn,
53 uint16_t tt_crc, bool full_table);
54bool send_tt_response(struct bat_priv *bat_priv,
55 struct tt_query_packet *tt_request);
56void tt_update_changes(struct bat_priv *bat_priv, struct orig_node *orig_node,
57 uint16_t tt_num_changes, uint8_t ttvn,
58 struct tt_change *tt_change);
59bool is_my_client(struct bat_priv *bat_priv, const uint8_t *addr);
60void handle_tt_response(struct bat_priv *bat_priv,
61 struct tt_query_packet *tt_response);
62void send_roam_adv(struct bat_priv *bat_priv, uint8_t *client,
63 struct orig_node *orig_node);
64void tt_commit_changes(struct bat_priv *bat_priv);
42 65
43#endif /* _NET_BATMAN_ADV_TRANSLATION_TABLE_H_ */ 66#endif /* _NET_BATMAN_ADV_TRANSLATION_TABLE_H_ */
diff --git a/net/batman-adv/types.h b/net/batman-adv/types.h
index fab70e8b16e..51a0db7f644 100644
--- a/net/batman-adv/types.h
+++ b/net/batman-adv/types.h
@@ -75,8 +75,18 @@ struct orig_node {
75 unsigned long batman_seqno_reset; 75 unsigned long batman_seqno_reset;
76 uint8_t gw_flags; 76 uint8_t gw_flags;
77 uint8_t flags; 77 uint8_t flags;
78 atomic_t last_ttvn; /* last seen translation table version number */
79 uint16_t tt_crc;
78 unsigned char *tt_buff; 80 unsigned char *tt_buff;
79 int16_t tt_buff_len; 81 int16_t tt_buff_len;
82 spinlock_t tt_buff_lock; /* protects tt_buff */
83 atomic_t tt_size;
84 /* The tt_poss_change flag is used to detect an ongoing roaming phase.
85 * If true, then I sent a Roaming_adv to this orig_node and I have to
86 * inspect every packet directed to it to check whether it is still
87 * the true destination or not. This flag will be reset to false as
88 * soon as I receive a new TTVN from this orig_node */
89 bool tt_poss_change;
80 uint32_t last_real_seqno; 90 uint32_t last_real_seqno;
81 uint8_t last_ttl; 91 uint8_t last_ttl;
82 unsigned long bcast_bits[NUM_WORDS]; 92 unsigned long bcast_bits[NUM_WORDS];
@@ -94,6 +104,7 @@ struct orig_node {
94 spinlock_t ogm_cnt_lock; 104 spinlock_t ogm_cnt_lock;
95 /* bcast_seqno_lock protects bcast_bits, last_bcast_seqno */ 105 /* bcast_seqno_lock protects bcast_bits, last_bcast_seqno */
96 spinlock_t bcast_seqno_lock; 106 spinlock_t bcast_seqno_lock;
107 spinlock_t tt_list_lock; /* protects tt_list */
97 atomic_t bond_candidates; 108 atomic_t bond_candidates;
98 struct list_head bond_list; 109 struct list_head bond_list;
99}; 110};
@@ -145,6 +156,15 @@ struct bat_priv {
145 atomic_t bcast_seqno; 156 atomic_t bcast_seqno;
146 atomic_t bcast_queue_left; 157 atomic_t bcast_queue_left;
147 atomic_t batman_queue_left; 158 atomic_t batman_queue_left;
159 atomic_t ttvn; /* tranlation table version number */
160 atomic_t tt_ogm_append_cnt;
161 atomic_t tt_local_changes; /* changes registered in a OGM interval */
162 /* The tt_poss_change flag is used to detect an ongoing roaming phase.
163 * If true, then I received a Roaming_adv and I have to inspect every
164 * packet directed to me to check whether I am still the true
165 * destination or not. This flag will be reset to false as soon as I
166 * increase my TTVN */
167 bool tt_poss_change;
148 char num_ifaces; 168 char num_ifaces;
149 struct debug_log *debug_log; 169 struct debug_log *debug_log;
150 struct kobject *mesh_obj; 170 struct kobject *mesh_obj;
@@ -153,26 +173,35 @@ struct bat_priv {
153 struct hlist_head forw_bcast_list; 173 struct hlist_head forw_bcast_list;
154 struct hlist_head gw_list; 174 struct hlist_head gw_list;
155 struct hlist_head softif_neigh_vids; 175 struct hlist_head softif_neigh_vids;
176 struct list_head tt_changes_list; /* tracks changes in a OGM int */
156 struct list_head vis_send_list; 177 struct list_head vis_send_list;
157 struct hashtable_t *orig_hash; 178 struct hashtable_t *orig_hash;
158 struct hashtable_t *tt_local_hash; 179 struct hashtable_t *tt_local_hash;
159 struct hashtable_t *tt_global_hash; 180 struct hashtable_t *tt_global_hash;
181 struct list_head tt_req_list; /* list of pending tt_requests */
182 struct list_head tt_roam_list;
160 struct hashtable_t *vis_hash; 183 struct hashtable_t *vis_hash;
161 spinlock_t forw_bat_list_lock; /* protects forw_bat_list */ 184 spinlock_t forw_bat_list_lock; /* protects forw_bat_list */
162 spinlock_t forw_bcast_list_lock; /* protects */ 185 spinlock_t forw_bcast_list_lock; /* protects */
163 spinlock_t tt_lhash_lock; /* protects tt_local_hash */ 186 spinlock_t tt_changes_list_lock; /* protects tt_changes */
164 spinlock_t tt_ghash_lock; /* protects tt_global_hash */ 187 spinlock_t tt_req_list_lock; /* protects tt_req_list */
188 spinlock_t tt_roam_list_lock; /* protects tt_roam_list */
165 spinlock_t gw_list_lock; /* protects gw_list and curr_gw */ 189 spinlock_t gw_list_lock; /* protects gw_list and curr_gw */
166 spinlock_t vis_hash_lock; /* protects vis_hash */ 190 spinlock_t vis_hash_lock; /* protects vis_hash */
167 spinlock_t vis_list_lock; /* protects vis_info::recv_list */ 191 spinlock_t vis_list_lock; /* protects vis_info::recv_list */
168 spinlock_t softif_neigh_lock; /* protects soft-interface neigh list */ 192 spinlock_t softif_neigh_lock; /* protects soft-interface neigh list */
169 spinlock_t softif_neigh_vid_lock; /* protects soft-interface vid list */ 193 spinlock_t softif_neigh_vid_lock; /* protects soft-interface vid list */
170 int16_t num_local_tt; 194 atomic_t num_local_tt;
171 atomic_t tt_local_changed; 195 /* Checksum of the local table, recomputed before sending a new OGM */
196 atomic_t tt_crc;
197 unsigned char *tt_buff;
198 int16_t tt_buff_len;
199 spinlock_t tt_buff_lock; /* protects tt_buff */
172 struct delayed_work tt_work; 200 struct delayed_work tt_work;
173 struct delayed_work orig_work; 201 struct delayed_work orig_work;
174 struct delayed_work vis_work; 202 struct delayed_work vis_work;
175 struct gw_node __rcu *curr_gw; /* rcu protected pointer */ 203 struct gw_node __rcu *curr_gw; /* rcu protected pointer */
204 atomic_t gw_reselect;
176 struct hard_iface __rcu *primary_if; /* rcu protected pointer */ 205 struct hard_iface __rcu *primary_if; /* rcu protected pointer */
177 struct vis_info *my_vis_info; 206 struct vis_info *my_vis_info;
178}; 207};
@@ -194,15 +223,40 @@ struct socket_packet {
194 223
195struct tt_local_entry { 224struct tt_local_entry {
196 uint8_t addr[ETH_ALEN]; 225 uint8_t addr[ETH_ALEN];
197 unsigned long last_seen;
198 char never_purge;
199 struct hlist_node hash_entry; 226 struct hlist_node hash_entry;
227 unsigned long last_seen;
228 uint16_t flags;
229 atomic_t refcount;
230 struct rcu_head rcu;
200}; 231};
201 232
202struct tt_global_entry { 233struct tt_global_entry {
203 uint8_t addr[ETH_ALEN]; 234 uint8_t addr[ETH_ALEN];
235 struct hlist_node hash_entry; /* entry in the global table */
204 struct orig_node *orig_node; 236 struct orig_node *orig_node;
205 struct hlist_node hash_entry; 237 uint8_t ttvn;
238 uint16_t flags; /* only TT_GLOBAL_ROAM is used */
239 unsigned long roam_at; /* time at which TT_GLOBAL_ROAM was set */
240 atomic_t refcount;
241 struct rcu_head rcu;
242};
243
244struct tt_change_node {
245 struct list_head list;
246 struct tt_change change;
247};
248
249struct tt_req_node {
250 uint8_t addr[ETH_ALEN];
251 unsigned long issued_at;
252 struct list_head list;
253};
254
255struct tt_roam_node {
256 uint8_t addr[ETH_ALEN];
257 atomic_t counter;
258 unsigned long first_time;
259 struct list_head list;
206}; 260};
207 261
208/** 262/**
@@ -246,10 +300,10 @@ struct frag_packet_list_entry {
246}; 300};
247 301
248struct vis_info { 302struct vis_info {
249 unsigned long first_seen; 303 unsigned long first_seen;
250 struct list_head recv_list; 304 /* list of server-neighbors we received a vis-packet
251 /* list of server-neighbors we received a vis-packet 305 * from. we should not reply to them. */
252 * from. we should not reply to them. */ 306 struct list_head recv_list;
253 struct list_head send_list; 307 struct list_head send_list;
254 struct kref refcount; 308 struct kref refcount;
255 struct hlist_node hash_entry; 309 struct hlist_node hash_entry;
diff --git a/net/batman-adv/unicast.c b/net/batman-adv/unicast.c
index 19c3daf34ac..32b125fb3d3 100644
--- a/net/batman-adv/unicast.c
+++ b/net/batman-adv/unicast.c
@@ -39,8 +39,8 @@ static struct sk_buff *frag_merge_packet(struct list_head *head,
39 (struct unicast_frag_packet *)skb->data; 39 (struct unicast_frag_packet *)skb->data;
40 struct sk_buff *tmp_skb; 40 struct sk_buff *tmp_skb;
41 struct unicast_packet *unicast_packet; 41 struct unicast_packet *unicast_packet;
42 int hdr_len = sizeof(struct unicast_packet); 42 int hdr_len = sizeof(*unicast_packet);
43 int uni_diff = sizeof(struct unicast_frag_packet) - hdr_len; 43 int uni_diff = sizeof(*up) - hdr_len;
44 44
45 /* set skb to the first part and tmp_skb to the second part */ 45 /* set skb to the first part and tmp_skb to the second part */
46 if (up->flags & UNI_FRAG_HEAD) { 46 if (up->flags & UNI_FRAG_HEAD) {
@@ -53,7 +53,7 @@ static struct sk_buff *frag_merge_packet(struct list_head *head,
53 if (skb_linearize(skb) < 0 || skb_linearize(tmp_skb) < 0) 53 if (skb_linearize(skb) < 0 || skb_linearize(tmp_skb) < 0)
54 goto err; 54 goto err;
55 55
56 skb_pull(tmp_skb, sizeof(struct unicast_frag_packet)); 56 skb_pull(tmp_skb, sizeof(*up));
57 if (pskb_expand_head(skb, 0, tmp_skb->len, GFP_ATOMIC) < 0) 57 if (pskb_expand_head(skb, 0, tmp_skb->len, GFP_ATOMIC) < 0)
58 goto err; 58 goto err;
59 59
@@ -99,8 +99,7 @@ static int frag_create_buffer(struct list_head *head)
99 struct frag_packet_list_entry *tfp; 99 struct frag_packet_list_entry *tfp;
100 100
101 for (i = 0; i < FRAG_BUFFER_SIZE; i++) { 101 for (i = 0; i < FRAG_BUFFER_SIZE; i++) {
102 tfp = kmalloc(sizeof(struct frag_packet_list_entry), 102 tfp = kmalloc(sizeof(*tfp), GFP_ATOMIC);
103 GFP_ATOMIC);
104 if (!tfp) { 103 if (!tfp) {
105 frag_list_free(head); 104 frag_list_free(head);
106 return -ENOMEM; 105 return -ENOMEM;
@@ -115,7 +114,7 @@ static int frag_create_buffer(struct list_head *head)
115} 114}
116 115
117static struct frag_packet_list_entry *frag_search_packet(struct list_head *head, 116static struct frag_packet_list_entry *frag_search_packet(struct list_head *head,
118 struct unicast_frag_packet *up) 117 const struct unicast_frag_packet *up)
119{ 118{
120 struct frag_packet_list_entry *tfp; 119 struct frag_packet_list_entry *tfp;
121 struct unicast_frag_packet *tmp_up = NULL; 120 struct unicast_frag_packet *tmp_up = NULL;
@@ -218,14 +217,14 @@ out:
218} 217}
219 218
220int frag_send_skb(struct sk_buff *skb, struct bat_priv *bat_priv, 219int frag_send_skb(struct sk_buff *skb, struct bat_priv *bat_priv,
221 struct hard_iface *hard_iface, uint8_t dstaddr[]) 220 struct hard_iface *hard_iface, const uint8_t dstaddr[])
222{ 221{
223 struct unicast_packet tmp_uc, *unicast_packet; 222 struct unicast_packet tmp_uc, *unicast_packet;
224 struct hard_iface *primary_if; 223 struct hard_iface *primary_if;
225 struct sk_buff *frag_skb; 224 struct sk_buff *frag_skb;
226 struct unicast_frag_packet *frag1, *frag2; 225 struct unicast_frag_packet *frag1, *frag2;
227 int uc_hdr_len = sizeof(struct unicast_packet); 226 int uc_hdr_len = sizeof(*unicast_packet);
228 int ucf_hdr_len = sizeof(struct unicast_frag_packet); 227 int ucf_hdr_len = sizeof(*frag1);
229 int data_len = skb->len - uc_hdr_len; 228 int data_len = skb->len - uc_hdr_len;
230 int large_tail = 0, ret = NET_RX_DROP; 229 int large_tail = 0, ret = NET_RX_DROP;
231 uint16_t seqno; 230 uint16_t seqno;
@@ -250,14 +249,14 @@ int frag_send_skb(struct sk_buff *skb, struct bat_priv *bat_priv,
250 frag1 = (struct unicast_frag_packet *)skb->data; 249 frag1 = (struct unicast_frag_packet *)skb->data;
251 frag2 = (struct unicast_frag_packet *)frag_skb->data; 250 frag2 = (struct unicast_frag_packet *)frag_skb->data;
252 251
253 memcpy(frag1, &tmp_uc, sizeof(struct unicast_packet)); 252 memcpy(frag1, &tmp_uc, sizeof(tmp_uc));
254 253
255 frag1->ttl--; 254 frag1->ttl--;
256 frag1->version = COMPAT_VERSION; 255 frag1->version = COMPAT_VERSION;
257 frag1->packet_type = BAT_UNICAST_FRAG; 256 frag1->packet_type = BAT_UNICAST_FRAG;
258 257
259 memcpy(frag1->orig, primary_if->net_dev->dev_addr, ETH_ALEN); 258 memcpy(frag1->orig, primary_if->net_dev->dev_addr, ETH_ALEN);
260 memcpy(frag2, frag1, sizeof(struct unicast_frag_packet)); 259 memcpy(frag2, frag1, sizeof(*frag2));
261 260
262 if (data_len & 1) 261 if (data_len & 1)
263 large_tail = UNI_FRAG_LARGETAIL; 262 large_tail = UNI_FRAG_LARGETAIL;
@@ -295,7 +294,7 @@ int unicast_send_skb(struct sk_buff *skb, struct bat_priv *bat_priv)
295 294
296 /* get routing information */ 295 /* get routing information */
297 if (is_multicast_ether_addr(ethhdr->h_dest)) { 296 if (is_multicast_ether_addr(ethhdr->h_dest)) {
298 orig_node = (struct orig_node *)gw_get_selected_orig(bat_priv); 297 orig_node = gw_get_selected_orig(bat_priv);
299 if (orig_node) 298 if (orig_node)
300 goto find_router; 299 goto find_router;
301 } 300 }
@@ -314,10 +313,7 @@ find_router:
314 if (!neigh_node) 313 if (!neigh_node)
315 goto out; 314 goto out;
316 315
317 if (neigh_node->if_incoming->if_status != IF_ACTIVE) 316 if (my_skb_head_push(skb, sizeof(*unicast_packet)) < 0)
318 goto out;
319
320 if (my_skb_head_push(skb, sizeof(struct unicast_packet)) < 0)
321 goto out; 317 goto out;
322 318
323 unicast_packet = (struct unicast_packet *)skb->data; 319 unicast_packet = (struct unicast_packet *)skb->data;
@@ -329,9 +325,12 @@ find_router:
329 unicast_packet->ttl = TTL; 325 unicast_packet->ttl = TTL;
330 /* copy the destination for faster routing */ 326 /* copy the destination for faster routing */
331 memcpy(unicast_packet->dest, orig_node->orig, ETH_ALEN); 327 memcpy(unicast_packet->dest, orig_node->orig, ETH_ALEN);
328 /* set the destination tt version number */
329 unicast_packet->ttvn =
330 (uint8_t)atomic_read(&orig_node->last_ttvn);
332 331
333 if (atomic_read(&bat_priv->fragmentation) && 332 if (atomic_read(&bat_priv->fragmentation) &&
334 data_len + sizeof(struct unicast_packet) > 333 data_len + sizeof(*unicast_packet) >
335 neigh_node->if_incoming->net_dev->mtu) { 334 neigh_node->if_incoming->net_dev->mtu) {
336 /* send frag skb decreases ttl */ 335 /* send frag skb decreases ttl */
337 unicast_packet->ttl++; 336 unicast_packet->ttl++;
diff --git a/net/batman-adv/unicast.h b/net/batman-adv/unicast.h
index 16ad7a9242b..62f54b95462 100644
--- a/net/batman-adv/unicast.h
+++ b/net/batman-adv/unicast.h
@@ -32,11 +32,11 @@ int frag_reassemble_skb(struct sk_buff *skb, struct bat_priv *bat_priv,
32void frag_list_free(struct list_head *head); 32void frag_list_free(struct list_head *head);
33int unicast_send_skb(struct sk_buff *skb, struct bat_priv *bat_priv); 33int unicast_send_skb(struct sk_buff *skb, struct bat_priv *bat_priv);
34int frag_send_skb(struct sk_buff *skb, struct bat_priv *bat_priv, 34int frag_send_skb(struct sk_buff *skb, struct bat_priv *bat_priv,
35 struct hard_iface *hard_iface, uint8_t dstaddr[]); 35 struct hard_iface *hard_iface, const uint8_t dstaddr[]);
36 36
37static inline int frag_can_reassemble(struct sk_buff *skb, int mtu) 37static inline int frag_can_reassemble(const struct sk_buff *skb, int mtu)
38{ 38{
39 struct unicast_frag_packet *unicast_packet; 39 const struct unicast_frag_packet *unicast_packet;
40 int uneven_correction = 0; 40 int uneven_correction = 0;
41 unsigned int merged_size; 41 unsigned int merged_size;
42 42
@@ -49,7 +49,7 @@ static inline int frag_can_reassemble(struct sk_buff *skb, int mtu)
49 uneven_correction = -1; 49 uneven_correction = -1;
50 } 50 }
51 51
52 merged_size = (skb->len - sizeof(struct unicast_frag_packet)) * 2; 52 merged_size = (skb->len - sizeof(*unicast_packet)) * 2;
53 merged_size += sizeof(struct unicast_packet) + uneven_correction; 53 merged_size += sizeof(struct unicast_packet) + uneven_correction;
54 54
55 return merged_size <= mtu; 55 return merged_size <= mtu;
diff --git a/net/batman-adv/vis.c b/net/batman-adv/vis.c
index c39f20cc1ba..8a1b98589d7 100644
--- a/net/batman-adv/vis.c
+++ b/net/batman-adv/vis.c
@@ -30,22 +30,6 @@
30 30
31#define MAX_VIS_PACKET_SIZE 1000 31#define MAX_VIS_PACKET_SIZE 1000
32 32
33/* Returns the smallest signed integer in two's complement with the sizeof x */
34#define smallest_signed_int(x) (1u << (7u + 8u * (sizeof(x) - 1u)))
35
36/* Checks if a sequence number x is a predecessor/successor of y.
37 * they handle overflows/underflows and can correctly check for a
38 * predecessor/successor unless the variable sequence number has grown by
39 * more then 2**(bitwidth(x)-1)-1.
40 * This means that for a uint8_t with the maximum value 255, it would think:
41 * - when adding nothing - it is neither a predecessor nor a successor
42 * - before adding more than 127 to the starting value - it is a predecessor,
43 * - when adding 128 - it is neither a predecessor nor a successor,
44 * - after adding more than 127 to the starting value - it is a successor */
45#define seq_before(x, y) ({typeof(x) _dummy = (x - y); \
46 _dummy > smallest_signed_int(_dummy); })
47#define seq_after(x, y) seq_before(y, x)
48
49static void start_vis_timer(struct bat_priv *bat_priv); 33static void start_vis_timer(struct bat_priv *bat_priv);
50 34
51/* free the info */ 35/* free the info */
@@ -68,10 +52,10 @@ static void free_info(struct kref *ref)
68} 52}
69 53
70/* Compare two vis packets, used by the hashing algorithm */ 54/* Compare two vis packets, used by the hashing algorithm */
71static int vis_info_cmp(struct hlist_node *node, void *data2) 55static int vis_info_cmp(const struct hlist_node *node, const void *data2)
72{ 56{
73 struct vis_info *d1, *d2; 57 const struct vis_info *d1, *d2;
74 struct vis_packet *p1, *p2; 58 const struct vis_packet *p1, *p2;
75 59
76 d1 = container_of(node, struct vis_info, hash_entry); 60 d1 = container_of(node, struct vis_info, hash_entry);
77 d2 = data2; 61 d2 = data2;
@@ -82,11 +66,11 @@ static int vis_info_cmp(struct hlist_node *node, void *data2)
82 66
83/* hash function to choose an entry in a hash table of given size */ 67/* hash function to choose an entry in a hash table of given size */
84/* hash algorithm from http://en.wikipedia.org/wiki/Hash_table */ 68/* hash algorithm from http://en.wikipedia.org/wiki/Hash_table */
85static int vis_info_choose(void *data, int size) 69static int vis_info_choose(const void *data, int size)
86{ 70{
87 struct vis_info *vis_info = data; 71 const struct vis_info *vis_info = data;
88 struct vis_packet *packet; 72 const struct vis_packet *packet;
89 unsigned char *key; 73 const unsigned char *key;
90 uint32_t hash = 0; 74 uint32_t hash = 0;
91 size_t i; 75 size_t i;
92 76
@@ -106,7 +90,7 @@ static int vis_info_choose(void *data, int size)
106} 90}
107 91
108static struct vis_info *vis_hash_find(struct bat_priv *bat_priv, 92static struct vis_info *vis_hash_find(struct bat_priv *bat_priv,
109 void *data) 93 const void *data)
110{ 94{
111 struct hashtable_t *hash = bat_priv->vis_hash; 95 struct hashtable_t *hash = bat_priv->vis_hash;
112 struct hlist_head *head; 96 struct hlist_head *head;
@@ -143,7 +127,7 @@ static void vis_data_insert_interface(const uint8_t *interface,
143 struct hlist_node *pos; 127 struct hlist_node *pos;
144 128
145 hlist_for_each_entry(entry, pos, if_list, list) { 129 hlist_for_each_entry(entry, pos, if_list, list) {
146 if (compare_eth(entry->addr, (void *)interface)) 130 if (compare_eth(entry->addr, interface))
147 return; 131 return;
148 } 132 }
149 133
@@ -156,7 +140,8 @@ static void vis_data_insert_interface(const uint8_t *interface,
156 hlist_add_head(&entry->list, if_list); 140 hlist_add_head(&entry->list, if_list);
157} 141}
158 142
159static ssize_t vis_data_read_prim_sec(char *buff, struct hlist_head *if_list) 143static ssize_t vis_data_read_prim_sec(char *buff,
144 const struct hlist_head *if_list)
160{ 145{
161 struct if_list_entry *entry; 146 struct if_list_entry *entry;
162 struct hlist_node *pos; 147 struct hlist_node *pos;
@@ -189,8 +174,9 @@ static size_t vis_data_count_prim_sec(struct hlist_head *if_list)
189} 174}
190 175
191/* read an entry */ 176/* read an entry */
192static ssize_t vis_data_read_entry(char *buff, struct vis_info_entry *entry, 177static ssize_t vis_data_read_entry(char *buff,
193 uint8_t *src, bool primary) 178 const struct vis_info_entry *entry,
179 const uint8_t *src, bool primary)
194{ 180{
195 /* maximal length: max(4+17+2, 3+17+1+3+2) == 26 */ 181 /* maximal length: max(4+17+2, 3+17+1+3+2) == 26 */
196 if (primary && entry->quality == 0) 182 if (primary && entry->quality == 0)
@@ -239,7 +225,7 @@ int vis_seq_print_text(struct seq_file *seq, void *offset)
239 hlist_for_each_entry_rcu(info, node, head, hash_entry) { 225 hlist_for_each_entry_rcu(info, node, head, hash_entry) {
240 packet = (struct vis_packet *)info->skb_packet->data; 226 packet = (struct vis_packet *)info->skb_packet->data;
241 entries = (struct vis_info_entry *) 227 entries = (struct vis_info_entry *)
242 ((char *)packet + sizeof(struct vis_packet)); 228 ((char *)packet + sizeof(*packet));
243 229
244 for (j = 0; j < packet->entries; j++) { 230 for (j = 0; j < packet->entries; j++) {
245 if (entries[j].quality == 0) 231 if (entries[j].quality == 0)
@@ -287,7 +273,7 @@ int vis_seq_print_text(struct seq_file *seq, void *offset)
287 hlist_for_each_entry_rcu(info, node, head, hash_entry) { 273 hlist_for_each_entry_rcu(info, node, head, hash_entry) {
288 packet = (struct vis_packet *)info->skb_packet->data; 274 packet = (struct vis_packet *)info->skb_packet->data;
289 entries = (struct vis_info_entry *) 275 entries = (struct vis_info_entry *)
290 ((char *)packet + sizeof(struct vis_packet)); 276 ((char *)packet + sizeof(*packet));
291 277
292 for (j = 0; j < packet->entries; j++) { 278 for (j = 0; j < packet->entries; j++) {
293 if (entries[j].quality == 0) 279 if (entries[j].quality == 0)
@@ -361,11 +347,11 @@ static void send_list_del(struct vis_info *info)
361 347
362/* tries to add one entry to the receive list. */ 348/* tries to add one entry to the receive list. */
363static void recv_list_add(struct bat_priv *bat_priv, 349static void recv_list_add(struct bat_priv *bat_priv,
364 struct list_head *recv_list, char *mac) 350 struct list_head *recv_list, const char *mac)
365{ 351{
366 struct recvlist_node *entry; 352 struct recvlist_node *entry;
367 353
368 entry = kmalloc(sizeof(struct recvlist_node), GFP_ATOMIC); 354 entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
369 if (!entry) 355 if (!entry)
370 return; 356 return;
371 357
@@ -377,9 +363,9 @@ static void recv_list_add(struct bat_priv *bat_priv,
377 363
378/* returns 1 if this mac is in the recv_list */ 364/* returns 1 if this mac is in the recv_list */
379static int recv_list_is_in(struct bat_priv *bat_priv, 365static int recv_list_is_in(struct bat_priv *bat_priv,
380 struct list_head *recv_list, char *mac) 366 const struct list_head *recv_list, const char *mac)
381{ 367{
382 struct recvlist_node *entry; 368 const struct recvlist_node *entry;
383 369
384 spin_lock_bh(&bat_priv->vis_list_lock); 370 spin_lock_bh(&bat_priv->vis_list_lock);
385 list_for_each_entry(entry, recv_list, list) { 371 list_for_each_entry(entry, recv_list, list) {
@@ -412,11 +398,11 @@ static struct vis_info *add_packet(struct bat_priv *bat_priv,
412 return NULL; 398 return NULL;
413 399
414 /* see if the packet is already in vis_hash */ 400 /* see if the packet is already in vis_hash */
415 search_elem.skb_packet = dev_alloc_skb(sizeof(struct vis_packet)); 401 search_elem.skb_packet = dev_alloc_skb(sizeof(*search_packet));
416 if (!search_elem.skb_packet) 402 if (!search_elem.skb_packet)
417 return NULL; 403 return NULL;
418 search_packet = (struct vis_packet *)skb_put(search_elem.skb_packet, 404 search_packet = (struct vis_packet *)skb_put(search_elem.skb_packet,
419 sizeof(struct vis_packet)); 405 sizeof(*search_packet));
420 406
421 memcpy(search_packet->vis_orig, vis_packet->vis_orig, ETH_ALEN); 407 memcpy(search_packet->vis_orig, vis_packet->vis_orig, ETH_ALEN);
422 old_info = vis_hash_find(bat_priv, &search_elem); 408 old_info = vis_hash_find(bat_priv, &search_elem);
@@ -442,27 +428,26 @@ static struct vis_info *add_packet(struct bat_priv *bat_priv,
442 kref_put(&old_info->refcount, free_info); 428 kref_put(&old_info->refcount, free_info);
443 } 429 }
444 430
445 info = kmalloc(sizeof(struct vis_info), GFP_ATOMIC); 431 info = kmalloc(sizeof(*info), GFP_ATOMIC);
446 if (!info) 432 if (!info)
447 return NULL; 433 return NULL;
448 434
449 info->skb_packet = dev_alloc_skb(sizeof(struct vis_packet) + 435 info->skb_packet = dev_alloc_skb(sizeof(*packet) + vis_info_len +
450 vis_info_len + sizeof(struct ethhdr)); 436 sizeof(struct ethhdr));
451 if (!info->skb_packet) { 437 if (!info->skb_packet) {
452 kfree(info); 438 kfree(info);
453 return NULL; 439 return NULL;
454 } 440 }
455 skb_reserve(info->skb_packet, sizeof(struct ethhdr)); 441 skb_reserve(info->skb_packet, sizeof(struct ethhdr));
456 packet = (struct vis_packet *)skb_put(info->skb_packet, 442 packet = (struct vis_packet *)skb_put(info->skb_packet, sizeof(*packet)
457 sizeof(struct vis_packet) + 443 + vis_info_len);
458 vis_info_len);
459 444
460 kref_init(&info->refcount); 445 kref_init(&info->refcount);
461 INIT_LIST_HEAD(&info->send_list); 446 INIT_LIST_HEAD(&info->send_list);
462 INIT_LIST_HEAD(&info->recv_list); 447 INIT_LIST_HEAD(&info->recv_list);
463 info->first_seen = jiffies; 448 info->first_seen = jiffies;
464 info->bat_priv = bat_priv; 449 info->bat_priv = bat_priv;
465 memcpy(packet, vis_packet, sizeof(struct vis_packet) + vis_info_len); 450 memcpy(packet, vis_packet, sizeof(*packet) + vis_info_len);
466 451
467 /* initialize and add new packet. */ 452 /* initialize and add new packet. */
468 *is_new = 1; 453 *is_new = 1;
@@ -599,9 +584,9 @@ static int find_best_vis_server(struct bat_priv *bat_priv,
599} 584}
600 585
601/* Return true if the vis packet is full. */ 586/* Return true if the vis packet is full. */
602static bool vis_packet_full(struct vis_info *info) 587static bool vis_packet_full(const struct vis_info *info)
603{ 588{
604 struct vis_packet *packet; 589 const struct vis_packet *packet;
605 packet = (struct vis_packet *)info->skb_packet->data; 590 packet = (struct vis_packet *)info->skb_packet->data;
606 591
607 if (MAX_VIS_PACKET_SIZE / sizeof(struct vis_info_entry) 592 if (MAX_VIS_PACKET_SIZE / sizeof(struct vis_info_entry)
@@ -619,7 +604,7 @@ static int generate_vis_packet(struct bat_priv *bat_priv)
619 struct hlist_head *head; 604 struct hlist_head *head;
620 struct orig_node *orig_node; 605 struct orig_node *orig_node;
621 struct neigh_node *router; 606 struct neigh_node *router;
622 struct vis_info *info = (struct vis_info *)bat_priv->my_vis_info; 607 struct vis_info *info = bat_priv->my_vis_info;
623 struct vis_packet *packet = (struct vis_packet *)info->skb_packet->data; 608 struct vis_packet *packet = (struct vis_packet *)info->skb_packet->data;
624 struct vis_info_entry *entry; 609 struct vis_info_entry *entry;
625 struct tt_local_entry *tt_local_entry; 610 struct tt_local_entry *tt_local_entry;
@@ -632,7 +617,7 @@ static int generate_vis_packet(struct bat_priv *bat_priv)
632 packet->ttl = TTL; 617 packet->ttl = TTL;
633 packet->seqno = htonl(ntohl(packet->seqno) + 1); 618 packet->seqno = htonl(ntohl(packet->seqno) + 1);
634 packet->entries = 0; 619 packet->entries = 0;
635 skb_trim(info->skb_packet, sizeof(struct vis_packet)); 620 skb_trim(info->skb_packet, sizeof(*packet));
636 621
637 if (packet->vis_type == VIS_TYPE_CLIENT_UPDATE) { 622 if (packet->vis_type == VIS_TYPE_CLIENT_UPDATE) {
638 best_tq = find_best_vis_server(bat_priv, info); 623 best_tq = find_best_vis_server(bat_priv, info);
@@ -680,11 +665,12 @@ next:
680 665
681 hash = bat_priv->tt_local_hash; 666 hash = bat_priv->tt_local_hash;
682 667
683 spin_lock_bh(&bat_priv->tt_lhash_lock);
684 for (i = 0; i < hash->size; i++) { 668 for (i = 0; i < hash->size; i++) {
685 head = &hash->table[i]; 669 head = &hash->table[i];
686 670
687 hlist_for_each_entry(tt_local_entry, node, head, hash_entry) { 671 rcu_read_lock();
672 hlist_for_each_entry_rcu(tt_local_entry, node, head,
673 hash_entry) {
688 entry = (struct vis_info_entry *) 674 entry = (struct vis_info_entry *)
689 skb_put(info->skb_packet, 675 skb_put(info->skb_packet,
690 sizeof(*entry)); 676 sizeof(*entry));
@@ -693,14 +679,12 @@ next:
693 entry->quality = 0; /* 0 means TT */ 679 entry->quality = 0; /* 0 means TT */
694 packet->entries++; 680 packet->entries++;
695 681
696 if (vis_packet_full(info)) { 682 if (vis_packet_full(info))
697 spin_unlock_bh(&bat_priv->tt_lhash_lock); 683 goto unlock;
698 return 0;
699 }
700 } 684 }
685 rcu_read_unlock();
701 } 686 }
702 687
703 spin_unlock_bh(&bat_priv->tt_lhash_lock);
704 return 0; 688 return 0;
705 689
706unlock: 690unlock:
@@ -908,17 +892,15 @@ int vis_init(struct bat_priv *bat_priv)
908 goto err; 892 goto err;
909 } 893 }
910 894
911 bat_priv->my_vis_info->skb_packet = dev_alloc_skb( 895 bat_priv->my_vis_info->skb_packet = dev_alloc_skb(sizeof(*packet) +
912 sizeof(struct vis_packet) + 896 MAX_VIS_PACKET_SIZE +
913 MAX_VIS_PACKET_SIZE + 897 sizeof(struct ethhdr));
914 sizeof(struct ethhdr));
915 if (!bat_priv->my_vis_info->skb_packet) 898 if (!bat_priv->my_vis_info->skb_packet)
916 goto free_info; 899 goto free_info;
917 900
918 skb_reserve(bat_priv->my_vis_info->skb_packet, sizeof(struct ethhdr)); 901 skb_reserve(bat_priv->my_vis_info->skb_packet, sizeof(struct ethhdr));
919 packet = (struct vis_packet *)skb_put( 902 packet = (struct vis_packet *)skb_put(bat_priv->my_vis_info->skb_packet,
920 bat_priv->my_vis_info->skb_packet, 903 sizeof(*packet));
921 sizeof(struct vis_packet));
922 904
923 /* prefill the vis info */ 905 /* prefill the vis info */
924 bat_priv->my_vis_info->first_seen = jiffies - 906 bat_priv->my_vis_info->first_seen = jiffies -