diff options
author | David S. Miller <davem@davemloft.net> | 2011-06-20 15:59:37 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2011-06-20 15:59:37 -0400 |
commit | eac56465b88cc9ad3b964a9f0a02be3d3a136ddf (patch) | |
tree | 63550c9f343f6c9351f0b54be8664807e97abb93 /net/batman-adv | |
parent | 1b9c4134c126aa8ae00a57672d4a4eaecc436b54 (diff) | |
parent | 43676ab590c3f8686fd047d34c3e33803eef71f0 (diff) |
Merge branch 'batman-adv/next' of git://git.open-mesh.org/ecsv/linux-merge
Diffstat (limited to 'net/batman-adv')
27 files changed, 2080 insertions, 462 deletions
diff --git a/net/batman-adv/Kconfig b/net/batman-adv/Kconfig index 6c051ad833eb..2b68d068eaf3 100644 --- a/net/batman-adv/Kconfig +++ b/net/batman-adv/Kconfig | |||
@@ -5,6 +5,7 @@ | |||
5 | config BATMAN_ADV | 5 | config BATMAN_ADV |
6 | tristate "B.A.T.M.A.N. Advanced Meshing Protocol" | 6 | tristate "B.A.T.M.A.N. Advanced Meshing Protocol" |
7 | depends on NET | 7 | depends on NET |
8 | select CRC16 | ||
8 | default n | 9 | default n |
9 | ---help--- | 10 | ---help--- |
10 | 11 | ||
diff --git a/net/batman-adv/aggregation.c b/net/batman-adv/aggregation.c index 4080970ade7d..c583e049f421 100644 --- a/net/batman-adv/aggregation.c +++ b/net/batman-adv/aggregation.c | |||
@@ -20,17 +20,12 @@ | |||
20 | */ | 20 | */ |
21 | 21 | ||
22 | #include "main.h" | 22 | #include "main.h" |
23 | #include "translation-table.h" | ||
23 | #include "aggregation.h" | 24 | #include "aggregation.h" |
24 | #include "send.h" | 25 | #include "send.h" |
25 | #include "routing.h" | 26 | #include "routing.h" |
26 | #include "hard-interface.h" | 27 | #include "hard-interface.h" |
27 | 28 | ||
28 | /* calculate the size of the tt information for a given packet */ | ||
29 | static int tt_len(const struct batman_packet *batman_packet) | ||
30 | { | ||
31 | return batman_packet->num_tt * ETH_ALEN; | ||
32 | } | ||
33 | |||
34 | /* return true if new_packet can be aggregated with forw_packet */ | 29 | /* return true if new_packet can be aggregated with forw_packet */ |
35 | static bool can_aggregate_with(const struct batman_packet *new_batman_packet, | 30 | static bool can_aggregate_with(const struct batman_packet *new_batman_packet, |
36 | int packet_len, | 31 | int packet_len, |
@@ -195,7 +190,7 @@ static void aggregate(struct forw_packet *forw_packet_aggr, | |||
195 | 190 | ||
196 | void add_bat_packet_to_list(struct bat_priv *bat_priv, | 191 | void add_bat_packet_to_list(struct bat_priv *bat_priv, |
197 | unsigned char *packet_buff, int packet_len, | 192 | unsigned char *packet_buff, int packet_len, |
198 | struct hard_iface *if_incoming, char own_packet, | 193 | struct hard_iface *if_incoming, int own_packet, |
199 | unsigned long send_time) | 194 | unsigned long send_time) |
200 | { | 195 | { |
201 | /** | 196 | /** |
@@ -264,18 +259,20 @@ void receive_aggr_bat_packet(const struct ethhdr *ethhdr, | |||
264 | batman_packet = (struct batman_packet *)packet_buff; | 259 | batman_packet = (struct batman_packet *)packet_buff; |
265 | 260 | ||
266 | do { | 261 | do { |
267 | /* network to host order for our 32bit seqno, and the | 262 | /* network to host order for our 32bit seqno and the |
268 | orig_interval. */ | 263 | orig_interval */ |
269 | batman_packet->seqno = ntohl(batman_packet->seqno); | 264 | batman_packet->seqno = ntohl(batman_packet->seqno); |
265 | batman_packet->tt_crc = ntohs(batman_packet->tt_crc); | ||
270 | 266 | ||
271 | tt_buff = packet_buff + buff_pos + BAT_PACKET_LEN; | 267 | tt_buff = packet_buff + buff_pos + BAT_PACKET_LEN; |
272 | receive_bat_packet(ethhdr, batman_packet, | ||
273 | tt_buff, tt_len(batman_packet), | ||
274 | if_incoming); | ||
275 | 268 | ||
276 | buff_pos += BAT_PACKET_LEN + tt_len(batman_packet); | 269 | receive_bat_packet(ethhdr, batman_packet, tt_buff, if_incoming); |
270 | |||
271 | buff_pos += BAT_PACKET_LEN + | ||
272 | tt_len(batman_packet->tt_num_changes); | ||
273 | |||
277 | batman_packet = (struct batman_packet *) | 274 | batman_packet = (struct batman_packet *) |
278 | (packet_buff + buff_pos); | 275 | (packet_buff + buff_pos); |
279 | } while (aggregated_packet(buff_pos, packet_len, | 276 | } while (aggregated_packet(buff_pos, packet_len, |
280 | batman_packet->num_tt)); | 277 | batman_packet->tt_num_changes)); |
281 | } | 278 | } |
diff --git a/net/batman-adv/aggregation.h b/net/batman-adv/aggregation.h index fedeb8d0e13f..216337bb841f 100644 --- a/net/batman-adv/aggregation.h +++ b/net/batman-adv/aggregation.h | |||
@@ -25,9 +25,11 @@ | |||
25 | #include "main.h" | 25 | #include "main.h" |
26 | 26 | ||
27 | /* is there another aggregated packet here? */ | 27 | /* is there another aggregated packet here? */ |
28 | static inline int aggregated_packet(int buff_pos, int packet_len, int num_tt) | 28 | static inline int aggregated_packet(int buff_pos, int packet_len, |
29 | int tt_num_changes) | ||
29 | { | 30 | { |
30 | int next_buff_pos = buff_pos + BAT_PACKET_LEN + (num_tt * ETH_ALEN); | 31 | int next_buff_pos = buff_pos + BAT_PACKET_LEN + (tt_num_changes * |
32 | sizeof(struct tt_change)); | ||
31 | 33 | ||
32 | return (next_buff_pos <= packet_len) && | 34 | return (next_buff_pos <= packet_len) && |
33 | (next_buff_pos <= MAX_AGGREGATION_BYTES); | 35 | (next_buff_pos <= MAX_AGGREGATION_BYTES); |
@@ -35,7 +37,7 @@ static inline int aggregated_packet(int buff_pos, int packet_len, int num_tt) | |||
35 | 37 | ||
36 | void add_bat_packet_to_list(struct bat_priv *bat_priv, | 38 | void add_bat_packet_to_list(struct bat_priv *bat_priv, |
37 | unsigned char *packet_buff, int packet_len, | 39 | unsigned char *packet_buff, int packet_len, |
38 | struct hard_iface *if_incoming, char own_packet, | 40 | struct hard_iface *if_incoming, int own_packet, |
39 | unsigned long send_time); | 41 | unsigned long send_time); |
40 | void receive_aggr_bat_packet(const struct ethhdr *ethhdr, | 42 | void receive_aggr_bat_packet(const struct ethhdr *ethhdr, |
41 | unsigned char *packet_buff, int packet_len, | 43 | unsigned char *packet_buff, int packet_len, |
diff --git a/net/batman-adv/bat_sysfs.c b/net/batman-adv/bat_sysfs.c index 924d5773da21..cd15deba60a1 100644 --- a/net/batman-adv/bat_sysfs.c +++ b/net/batman-adv/bat_sysfs.c | |||
@@ -40,6 +40,20 @@ static struct bat_priv *kobj_to_batpriv(struct kobject *obj) | |||
40 | return netdev_priv(net_dev); | 40 | return netdev_priv(net_dev); |
41 | } | 41 | } |
42 | 42 | ||
43 | #define UEV_TYPE_VAR "BATTYPE=" | ||
44 | #define UEV_ACTION_VAR "BATACTION=" | ||
45 | #define UEV_DATA_VAR "BATDATA=" | ||
46 | |||
47 | static char *uev_action_str[] = { | ||
48 | "add", | ||
49 | "del", | ||
50 | "change" | ||
51 | }; | ||
52 | |||
53 | static char *uev_type_str[] = { | ||
54 | "gw" | ||
55 | }; | ||
56 | |||
43 | /* Use this, if you have customized show and store functions */ | 57 | /* Use this, if you have customized show and store functions */ |
44 | #define BAT_ATTR(_name, _mode, _show, _store) \ | 58 | #define BAT_ATTR(_name, _mode, _show, _store) \ |
45 | struct bat_attribute bat_attr_##_name = { \ | 59 | struct bat_attribute bat_attr_##_name = { \ |
@@ -375,7 +389,7 @@ BAT_ATTR_UINT(gw_sel_class, S_IRUGO | S_IWUSR, 1, TQ_MAX_VALUE, | |||
375 | static BAT_ATTR(gw_bandwidth, S_IRUGO | S_IWUSR, show_gw_bwidth, | 389 | static BAT_ATTR(gw_bandwidth, S_IRUGO | S_IWUSR, show_gw_bwidth, |
376 | store_gw_bwidth); | 390 | store_gw_bwidth); |
377 | #ifdef CONFIG_BATMAN_ADV_DEBUG | 391 | #ifdef CONFIG_BATMAN_ADV_DEBUG |
378 | BAT_ATTR_UINT(log_level, S_IRUGO | S_IWUSR, 0, 3, NULL); | 392 | BAT_ATTR_UINT(log_level, S_IRUGO | S_IWUSR, 0, 7, NULL); |
379 | #endif | 393 | #endif |
380 | 394 | ||
381 | static struct bat_attribute *mesh_attrs[] = { | 395 | static struct bat_attribute *mesh_attrs[] = { |
@@ -601,3 +615,60 @@ void sysfs_del_hardif(struct kobject **hardif_obj) | |||
601 | kobject_put(*hardif_obj); | 615 | kobject_put(*hardif_obj); |
602 | *hardif_obj = NULL; | 616 | *hardif_obj = NULL; |
603 | } | 617 | } |
618 | |||
619 | int throw_uevent(struct bat_priv *bat_priv, enum uev_type type, | ||
620 | enum uev_action action, const char *data) | ||
621 | { | ||
622 | int ret = -1; | ||
623 | struct hard_iface *primary_if = NULL; | ||
624 | struct kobject *bat_kobj; | ||
625 | char *uevent_env[4] = { NULL, NULL, NULL, NULL }; | ||
626 | |||
627 | primary_if = primary_if_get_selected(bat_priv); | ||
628 | if (!primary_if) | ||
629 | goto out; | ||
630 | |||
631 | bat_kobj = &primary_if->soft_iface->dev.kobj; | ||
632 | |||
633 | uevent_env[0] = kmalloc(strlen(UEV_TYPE_VAR) + | ||
634 | strlen(uev_type_str[type]) + 1, | ||
635 | GFP_ATOMIC); | ||
636 | if (!uevent_env[0]) | ||
637 | goto out; | ||
638 | |||
639 | sprintf(uevent_env[0], "%s%s", UEV_TYPE_VAR, uev_type_str[type]); | ||
640 | |||
641 | uevent_env[1] = kmalloc(strlen(UEV_ACTION_VAR) + | ||
642 | strlen(uev_action_str[action]) + 1, | ||
643 | GFP_ATOMIC); | ||
644 | if (!uevent_env[1]) | ||
645 | goto out; | ||
646 | |||
647 | sprintf(uevent_env[1], "%s%s", UEV_ACTION_VAR, uev_action_str[action]); | ||
648 | |||
649 | /* If the event is DEL, ignore the data field */ | ||
650 | if (action != UEV_DEL) { | ||
651 | uevent_env[2] = kmalloc(strlen(UEV_DATA_VAR) + | ||
652 | strlen(data) + 1, GFP_ATOMIC); | ||
653 | if (!uevent_env[2]) | ||
654 | goto out; | ||
655 | |||
656 | sprintf(uevent_env[2], "%s%s", UEV_DATA_VAR, data); | ||
657 | } | ||
658 | |||
659 | ret = kobject_uevent_env(bat_kobj, KOBJ_CHANGE, uevent_env); | ||
660 | out: | ||
661 | kfree(uevent_env[0]); | ||
662 | kfree(uevent_env[1]); | ||
663 | kfree(uevent_env[2]); | ||
664 | |||
665 | if (primary_if) | ||
666 | hardif_free_ref(primary_if); | ||
667 | |||
668 | if (ret) | ||
669 | bat_dbg(DBG_BATMAN, bat_priv, "Impossible to send " | ||
670 | "uevent for (%s,%s,%s) event (err: %d)\n", | ||
671 | uev_type_str[type], uev_action_str[action], | ||
672 | (action == UEV_DEL ? "NULL" : data), ret); | ||
673 | return ret; | ||
674 | } | ||
diff --git a/net/batman-adv/bat_sysfs.h b/net/batman-adv/bat_sysfs.h index 02f1fa7aadfa..a3f75a723c56 100644 --- a/net/batman-adv/bat_sysfs.h +++ b/net/batman-adv/bat_sysfs.h | |||
@@ -38,5 +38,7 @@ int sysfs_add_meshif(struct net_device *dev); | |||
38 | void sysfs_del_meshif(struct net_device *dev); | 38 | void sysfs_del_meshif(struct net_device *dev); |
39 | int sysfs_add_hardif(struct kobject **hardif_obj, struct net_device *dev); | 39 | int sysfs_add_hardif(struct kobject **hardif_obj, struct net_device *dev); |
40 | void sysfs_del_hardif(struct kobject **hardif_obj); | 40 | void sysfs_del_hardif(struct kobject **hardif_obj); |
41 | int throw_uevent(struct bat_priv *bat_priv, enum uev_type type, | ||
42 | enum uev_action action, const char *data); | ||
41 | 43 | ||
42 | #endif /* _NET_BATMAN_ADV_SYSFS_H_ */ | 44 | #endif /* _NET_BATMAN_ADV_SYSFS_H_ */ |
diff --git a/net/batman-adv/bitarray.c b/net/batman-adv/bitarray.c index 700ee4f7a945..c1f4bfc09cc3 100644 --- a/net/batman-adv/bitarray.c +++ b/net/batman-adv/bitarray.c | |||
@@ -26,8 +26,8 @@ | |||
26 | 26 | ||
27 | /* returns true if the corresponding bit in the given seq_bits indicates true | 27 | /* returns true if the corresponding bit in the given seq_bits indicates true |
28 | * and curr_seqno is within range of last_seqno */ | 28 | * and curr_seqno is within range of last_seqno */ |
29 | uint8_t get_bit_status(const unsigned long *seq_bits, uint32_t last_seqno, | 29 | int get_bit_status(const unsigned long *seq_bits, uint32_t last_seqno, |
30 | uint32_t curr_seqno) | 30 | uint32_t curr_seqno) |
31 | { | 31 | { |
32 | int32_t diff, word_offset, word_num; | 32 | int32_t diff, word_offset, word_num; |
33 | 33 | ||
@@ -127,8 +127,8 @@ static void bit_reset_window(unsigned long *seq_bits) | |||
127 | * 1 if the window was moved (either new or very old) | 127 | * 1 if the window was moved (either new or very old) |
128 | * 0 if the window was not moved/shifted. | 128 | * 0 if the window was not moved/shifted. |
129 | */ | 129 | */ |
130 | char bit_get_packet(void *priv, unsigned long *seq_bits, | 130 | int bit_get_packet(void *priv, unsigned long *seq_bits, |
131 | int32_t seq_num_diff, int8_t set_mark) | 131 | int32_t seq_num_diff, int set_mark) |
132 | { | 132 | { |
133 | struct bat_priv *bat_priv = priv; | 133 | struct bat_priv *bat_priv = priv; |
134 | 134 | ||
diff --git a/net/batman-adv/bitarray.h b/net/batman-adv/bitarray.h index e32eb2ddd2d2..9c04422aeb07 100644 --- a/net/batman-adv/bitarray.h +++ b/net/batman-adv/bitarray.h | |||
@@ -26,8 +26,8 @@ | |||
26 | 26 | ||
27 | /* returns true if the corresponding bit in the given seq_bits indicates true | 27 | /* returns true if the corresponding bit in the given seq_bits indicates true |
28 | * and curr_seqno is within range of last_seqno */ | 28 | * and curr_seqno is within range of last_seqno */ |
29 | uint8_t get_bit_status(const unsigned long *seq_bits, uint32_t last_seqno, | 29 | int get_bit_status(const unsigned long *seq_bits, uint32_t last_seqno, |
30 | uint32_t curr_seqno); | 30 | uint32_t curr_seqno); |
31 | 31 | ||
32 | /* turn corresponding bit on, so we can remember that we got the packet */ | 32 | /* turn corresponding bit on, so we can remember that we got the packet */ |
33 | void bit_mark(unsigned long *seq_bits, int32_t n); | 33 | void bit_mark(unsigned long *seq_bits, int32_t n); |
@@ -35,8 +35,8 @@ void bit_mark(unsigned long *seq_bits, int32_t n); | |||
35 | 35 | ||
36 | /* receive and process one packet, returns 1 if received seq_num is considered | 36 | /* receive and process one packet, returns 1 if received seq_num is considered |
37 | * new, 0 if old */ | 37 | * new, 0 if old */ |
38 | char bit_get_packet(void *priv, unsigned long *seq_bits, | 38 | int bit_get_packet(void *priv, unsigned long *seq_bits, |
39 | int32_t seq_num_diff, int8_t set_mark); | 39 | int32_t seq_num_diff, int set_mark); |
40 | 40 | ||
41 | /* count the hamming weight, how many good packets did we receive? */ | 41 | /* count the hamming weight, how many good packets did we receive? */ |
42 | int bit_packet_count(const unsigned long *seq_bits); | 42 | int bit_packet_count(const unsigned long *seq_bits); |
diff --git a/net/batman-adv/gateway_client.c b/net/batman-adv/gateway_client.c index 24aee561f3d8..8b25b52a4764 100644 --- a/net/batman-adv/gateway_client.c +++ b/net/batman-adv/gateway_client.c | |||
@@ -20,15 +20,22 @@ | |||
20 | */ | 20 | */ |
21 | 21 | ||
22 | #include "main.h" | 22 | #include "main.h" |
23 | #include "bat_sysfs.h" | ||
23 | #include "gateway_client.h" | 24 | #include "gateway_client.h" |
24 | #include "gateway_common.h" | 25 | #include "gateway_common.h" |
25 | #include "hard-interface.h" | 26 | #include "hard-interface.h" |
26 | #include "originator.h" | 27 | #include "originator.h" |
28 | #include "routing.h" | ||
27 | #include <linux/ip.h> | 29 | #include <linux/ip.h> |
28 | #include <linux/ipv6.h> | 30 | #include <linux/ipv6.h> |
29 | #include <linux/udp.h> | 31 | #include <linux/udp.h> |
30 | #include <linux/if_vlan.h> | 32 | #include <linux/if_vlan.h> |
31 | 33 | ||
34 | /* This is the offset of the options field in a dhcp packet starting at | ||
35 | * the beginning of the dhcp header */ | ||
36 | #define DHCP_OPTIONS_OFFSET 240 | ||
37 | #define DHCP_REQUEST 3 | ||
38 | |||
32 | static void gw_node_free_ref(struct gw_node *gw_node) | 39 | static void gw_node_free_ref(struct gw_node *gw_node) |
33 | { | 40 | { |
34 | if (atomic_dec_and_test(&gw_node->refcount)) | 41 | if (atomic_dec_and_test(&gw_node->refcount)) |
@@ -97,40 +104,19 @@ static void gw_select(struct bat_priv *bat_priv, struct gw_node *new_gw_node) | |||
97 | 104 | ||
98 | void gw_deselect(struct bat_priv *bat_priv) | 105 | void gw_deselect(struct bat_priv *bat_priv) |
99 | { | 106 | { |
100 | gw_select(bat_priv, NULL); | 107 | atomic_set(&bat_priv->gw_reselect, 1); |
101 | } | 108 | } |
102 | 109 | ||
103 | void gw_election(struct bat_priv *bat_priv) | 110 | static struct gw_node *gw_get_best_gw_node(struct bat_priv *bat_priv) |
104 | { | 111 | { |
105 | struct hlist_node *node; | ||
106 | struct gw_node *gw_node, *curr_gw = NULL, *curr_gw_tmp = NULL; | ||
107 | struct neigh_node *router; | 112 | struct neigh_node *router; |
108 | uint8_t max_tq = 0; | 113 | struct hlist_node *node; |
114 | struct gw_node *gw_node, *curr_gw = NULL; | ||
109 | uint32_t max_gw_factor = 0, tmp_gw_factor = 0; | 115 | uint32_t max_gw_factor = 0, tmp_gw_factor = 0; |
116 | uint8_t max_tq = 0; | ||
110 | int down, up; | 117 | int down, up; |
111 | 118 | ||
112 | /** | ||
113 | * The batman daemon checks here if we already passed a full originator | ||
114 | * cycle in order to make sure we don't choose the first gateway we | ||
115 | * hear about. This check is based on the daemon's uptime which we | ||
116 | * don't have. | ||
117 | **/ | ||
118 | if (atomic_read(&bat_priv->gw_mode) != GW_MODE_CLIENT) | ||
119 | return; | ||
120 | |||
121 | curr_gw = gw_get_selected_gw_node(bat_priv); | ||
122 | if (curr_gw) | ||
123 | goto out; | ||
124 | |||
125 | rcu_read_lock(); | 119 | rcu_read_lock(); |
126 | if (hlist_empty(&bat_priv->gw_list)) { | ||
127 | bat_dbg(DBG_BATMAN, bat_priv, | ||
128 | "Removing selected gateway - " | ||
129 | "no gateway in range\n"); | ||
130 | gw_deselect(bat_priv); | ||
131 | goto unlock; | ||
132 | } | ||
133 | |||
134 | hlist_for_each_entry_rcu(gw_node, node, &bat_priv->gw_list, list) { | 120 | hlist_for_each_entry_rcu(gw_node, node, &bat_priv->gw_list, list) { |
135 | if (gw_node->deleted) | 121 | if (gw_node->deleted) |
136 | continue; | 122 | continue; |
@@ -139,6 +125,9 @@ void gw_election(struct bat_priv *bat_priv) | |||
139 | if (!router) | 125 | if (!router) |
140 | continue; | 126 | continue; |
141 | 127 | ||
128 | if (!atomic_inc_not_zero(&gw_node->refcount)) | ||
129 | goto next; | ||
130 | |||
142 | switch (atomic_read(&bat_priv->gw_sel_class)) { | 131 | switch (atomic_read(&bat_priv->gw_sel_class)) { |
143 | case 1: /* fast connection */ | 132 | case 1: /* fast connection */ |
144 | gw_bandwidth_to_kbit(gw_node->orig_node->gw_flags, | 133 | gw_bandwidth_to_kbit(gw_node->orig_node->gw_flags, |
@@ -151,8 +140,12 @@ void gw_election(struct bat_priv *bat_priv) | |||
151 | 140 | ||
152 | if ((tmp_gw_factor > max_gw_factor) || | 141 | if ((tmp_gw_factor > max_gw_factor) || |
153 | ((tmp_gw_factor == max_gw_factor) && | 142 | ((tmp_gw_factor == max_gw_factor) && |
154 | (router->tq_avg > max_tq))) | 143 | (router->tq_avg > max_tq))) { |
155 | curr_gw_tmp = gw_node; | 144 | if (curr_gw) |
145 | gw_node_free_ref(curr_gw); | ||
146 | curr_gw = gw_node; | ||
147 | atomic_inc(&curr_gw->refcount); | ||
148 | } | ||
156 | break; | 149 | break; |
157 | 150 | ||
158 | default: /** | 151 | default: /** |
@@ -163,8 +156,12 @@ void gw_election(struct bat_priv *bat_priv) | |||
163 | * soon as a better gateway appears which has | 156 | * soon as a better gateway appears which has |
164 | * $routing_class more tq points) | 157 | * $routing_class more tq points) |
165 | **/ | 158 | **/ |
166 | if (router->tq_avg > max_tq) | 159 | if (router->tq_avg > max_tq) { |
167 | curr_gw_tmp = gw_node; | 160 | if (curr_gw) |
161 | gw_node_free_ref(curr_gw); | ||
162 | curr_gw = gw_node; | ||
163 | atomic_inc(&curr_gw->refcount); | ||
164 | } | ||
168 | break; | 165 | break; |
169 | } | 166 | } |
170 | 167 | ||
@@ -174,42 +171,81 @@ void gw_election(struct bat_priv *bat_priv) | |||
174 | if (tmp_gw_factor > max_gw_factor) | 171 | if (tmp_gw_factor > max_gw_factor) |
175 | max_gw_factor = tmp_gw_factor; | 172 | max_gw_factor = tmp_gw_factor; |
176 | 173 | ||
174 | gw_node_free_ref(gw_node); | ||
175 | |||
176 | next: | ||
177 | neigh_node_free_ref(router); | 177 | neigh_node_free_ref(router); |
178 | } | 178 | } |
179 | rcu_read_unlock(); | ||
179 | 180 | ||
180 | if (curr_gw != curr_gw_tmp) { | 181 | return curr_gw; |
181 | router = orig_node_get_router(curr_gw_tmp->orig_node); | 182 | } |
182 | if (!router) | ||
183 | goto unlock; | ||
184 | 183 | ||
185 | if ((curr_gw) && (!curr_gw_tmp)) | 184 | void gw_election(struct bat_priv *bat_priv) |
186 | bat_dbg(DBG_BATMAN, bat_priv, | 185 | { |
187 | "Removing selected gateway - " | 186 | struct gw_node *curr_gw = NULL, *next_gw = NULL; |
188 | "no gateway in range\n"); | 187 | struct neigh_node *router = NULL; |
189 | else if ((!curr_gw) && (curr_gw_tmp)) | 188 | char gw_addr[18] = { '\0' }; |
190 | bat_dbg(DBG_BATMAN, bat_priv, | ||
191 | "Adding route to gateway %pM " | ||
192 | "(gw_flags: %i, tq: %i)\n", | ||
193 | curr_gw_tmp->orig_node->orig, | ||
194 | curr_gw_tmp->orig_node->gw_flags, | ||
195 | router->tq_avg); | ||
196 | else | ||
197 | bat_dbg(DBG_BATMAN, bat_priv, | ||
198 | "Changing route to gateway %pM " | ||
199 | "(gw_flags: %i, tq: %i)\n", | ||
200 | curr_gw_tmp->orig_node->orig, | ||
201 | curr_gw_tmp->orig_node->gw_flags, | ||
202 | router->tq_avg); | ||
203 | 189 | ||
204 | neigh_node_free_ref(router); | 190 | /** |
205 | gw_select(bat_priv, curr_gw_tmp); | 191 | * The batman daemon checks here if we already passed a full originator |
192 | * cycle in order to make sure we don't choose the first gateway we | ||
193 | * hear about. This check is based on the daemon's uptime which we | ||
194 | * don't have. | ||
195 | **/ | ||
196 | if (atomic_read(&bat_priv->gw_mode) != GW_MODE_CLIENT) | ||
197 | goto out; | ||
198 | |||
199 | if (!atomic_dec_not_zero(&bat_priv->gw_reselect)) | ||
200 | goto out; | ||
201 | |||
202 | curr_gw = gw_get_selected_gw_node(bat_priv); | ||
203 | |||
204 | next_gw = gw_get_best_gw_node(bat_priv); | ||
205 | |||
206 | if (curr_gw == next_gw) | ||
207 | goto out; | ||
208 | |||
209 | if (next_gw) { | ||
210 | sprintf(gw_addr, "%pM", next_gw->orig_node->orig); | ||
211 | |||
212 | router = orig_node_get_router(next_gw->orig_node); | ||
213 | if (!router) { | ||
214 | gw_deselect(bat_priv); | ||
215 | goto out; | ||
216 | } | ||
206 | } | 217 | } |
207 | 218 | ||
208 | unlock: | 219 | if ((curr_gw) && (!next_gw)) { |
209 | rcu_read_unlock(); | 220 | bat_dbg(DBG_BATMAN, bat_priv, |
221 | "Removing selected gateway - no gateway in range\n"); | ||
222 | throw_uevent(bat_priv, UEV_GW, UEV_DEL, NULL); | ||
223 | } else if ((!curr_gw) && (next_gw)) { | ||
224 | bat_dbg(DBG_BATMAN, bat_priv, | ||
225 | "Adding route to gateway %pM (gw_flags: %i, tq: %i)\n", | ||
226 | next_gw->orig_node->orig, | ||
227 | next_gw->orig_node->gw_flags, | ||
228 | router->tq_avg); | ||
229 | throw_uevent(bat_priv, UEV_GW, UEV_ADD, gw_addr); | ||
230 | } else { | ||
231 | bat_dbg(DBG_BATMAN, bat_priv, | ||
232 | "Changing route to gateway %pM " | ||
233 | "(gw_flags: %i, tq: %i)\n", | ||
234 | next_gw->orig_node->orig, | ||
235 | next_gw->orig_node->gw_flags, | ||
236 | router->tq_avg); | ||
237 | throw_uevent(bat_priv, UEV_GW, UEV_CHANGE, gw_addr); | ||
238 | } | ||
239 | |||
240 | gw_select(bat_priv, next_gw); | ||
241 | |||
210 | out: | 242 | out: |
211 | if (curr_gw) | 243 | if (curr_gw) |
212 | gw_node_free_ref(curr_gw); | 244 | gw_node_free_ref(curr_gw); |
245 | if (next_gw) | ||
246 | gw_node_free_ref(next_gw); | ||
247 | if (router) | ||
248 | neigh_node_free_ref(router); | ||
213 | } | 249 | } |
214 | 250 | ||
215 | void gw_check_election(struct bat_priv *bat_priv, struct orig_node *orig_node) | 251 | void gw_check_election(struct bat_priv *bat_priv, struct orig_node *orig_node) |
@@ -360,7 +396,7 @@ void gw_node_purge(struct bat_priv *bat_priv) | |||
360 | struct gw_node *gw_node, *curr_gw; | 396 | struct gw_node *gw_node, *curr_gw; |
361 | struct hlist_node *node, *node_tmp; | 397 | struct hlist_node *node, *node_tmp; |
362 | unsigned long timeout = 2 * PURGE_TIMEOUT * HZ; | 398 | unsigned long timeout = 2 * PURGE_TIMEOUT * HZ; |
363 | char do_deselect = 0; | 399 | int do_deselect = 0; |
364 | 400 | ||
365 | curr_gw = gw_get_selected_gw_node(bat_priv); | 401 | curr_gw = gw_get_selected_gw_node(bat_priv); |
366 | 402 | ||
@@ -479,14 +515,75 @@ out: | |||
479 | return ret; | 515 | return ret; |
480 | } | 516 | } |
481 | 517 | ||
482 | int gw_is_target(struct bat_priv *bat_priv, struct sk_buff *skb) | 518 | static bool is_type_dhcprequest(struct sk_buff *skb, int header_len) |
519 | { | ||
520 | int ret = false; | ||
521 | unsigned char *p; | ||
522 | int pkt_len; | ||
523 | |||
524 | if (skb_linearize(skb) < 0) | ||
525 | goto out; | ||
526 | |||
527 | pkt_len = skb_headlen(skb); | ||
528 | |||
529 | if (pkt_len < header_len + DHCP_OPTIONS_OFFSET + 1) | ||
530 | goto out; | ||
531 | |||
532 | p = skb->data + header_len + DHCP_OPTIONS_OFFSET; | ||
533 | pkt_len -= header_len + DHCP_OPTIONS_OFFSET + 1; | ||
534 | |||
535 | /* Access the dhcp option lists. Each entry is made up by: | ||
536 | * - octect 1: option type | ||
537 | * - octect 2: option data len (only if type != 255 and 0) | ||
538 | * - octect 3: option data */ | ||
539 | while (*p != 255 && !ret) { | ||
540 | /* p now points to the first octect: option type */ | ||
541 | if (*p == 53) { | ||
542 | /* type 53 is the message type option. | ||
543 | * Jump the len octect and go to the data octect */ | ||
544 | if (pkt_len < 2) | ||
545 | goto out; | ||
546 | p += 2; | ||
547 | |||
548 | /* check if the message type is what we need */ | ||
549 | if (*p == DHCP_REQUEST) | ||
550 | ret = true; | ||
551 | break; | ||
552 | } else if (*p == 0) { | ||
553 | /* option type 0 (padding), just go forward */ | ||
554 | if (pkt_len < 1) | ||
555 | goto out; | ||
556 | pkt_len--; | ||
557 | p++; | ||
558 | } else { | ||
559 | /* This is any other option. So we get the length... */ | ||
560 | if (pkt_len < 1) | ||
561 | goto out; | ||
562 | pkt_len--; | ||
563 | p++; | ||
564 | |||
565 | /* ...and then we jump over the data */ | ||
566 | if (pkt_len < *p) | ||
567 | goto out; | ||
568 | pkt_len -= *p; | ||
569 | p += (*p); | ||
570 | } | ||
571 | } | ||
572 | out: | ||
573 | return ret; | ||
574 | } | ||
575 | |||
576 | int gw_is_target(struct bat_priv *bat_priv, struct sk_buff *skb, | ||
577 | struct orig_node *old_gw) | ||
483 | { | 578 | { |
484 | struct ethhdr *ethhdr; | 579 | struct ethhdr *ethhdr; |
485 | struct iphdr *iphdr; | 580 | struct iphdr *iphdr; |
486 | struct ipv6hdr *ipv6hdr; | 581 | struct ipv6hdr *ipv6hdr; |
487 | struct udphdr *udphdr; | 582 | struct udphdr *udphdr; |
488 | struct gw_node *curr_gw; | 583 | struct gw_node *curr_gw; |
584 | struct neigh_node *neigh_curr = NULL, *neigh_old = NULL; | ||
489 | unsigned int header_len = 0; | 585 | unsigned int header_len = 0; |
586 | int ret = 1; | ||
490 | 587 | ||
491 | if (atomic_read(&bat_priv->gw_mode) == GW_MODE_OFF) | 588 | if (atomic_read(&bat_priv->gw_mode) == GW_MODE_OFF) |
492 | return 0; | 589 | return 0; |
@@ -554,7 +651,30 @@ int gw_is_target(struct bat_priv *bat_priv, struct sk_buff *skb) | |||
554 | if (!curr_gw) | 651 | if (!curr_gw) |
555 | return 0; | 652 | return 0; |
556 | 653 | ||
654 | /* If old_gw != NULL then this packet is unicast. | ||
655 | * So, at this point we have to check the message type: if it is a | ||
656 | * DHCPREQUEST we have to decide whether to drop it or not */ | ||
657 | if (old_gw && curr_gw->orig_node != old_gw) { | ||
658 | if (is_type_dhcprequest(skb, header_len)) { | ||
659 | /* If the dhcp packet has been sent to a different gw, | ||
660 | * we have to evaluate whether the old gw is still | ||
661 | * reliable enough */ | ||
662 | neigh_curr = find_router(bat_priv, curr_gw->orig_node, | ||
663 | NULL); | ||
664 | neigh_old = find_router(bat_priv, old_gw, NULL); | ||
665 | if (!neigh_curr || !neigh_old) | ||
666 | goto free_neigh; | ||
667 | if (neigh_curr->tq_avg - neigh_old->tq_avg < | ||
668 | GW_THRESHOLD) | ||
669 | ret = -1; | ||
670 | } | ||
671 | } | ||
672 | free_neigh: | ||
673 | if (neigh_old) | ||
674 | neigh_node_free_ref(neigh_old); | ||
675 | if (neigh_curr) | ||
676 | neigh_node_free_ref(neigh_curr); | ||
557 | if (curr_gw) | 677 | if (curr_gw) |
558 | gw_node_free_ref(curr_gw); | 678 | gw_node_free_ref(curr_gw); |
559 | return 1; | 679 | return ret; |
560 | } | 680 | } |
diff --git a/net/batman-adv/gateway_client.h b/net/batman-adv/gateway_client.h index 1ce8c6066da1..b9b983c07feb 100644 --- a/net/batman-adv/gateway_client.h +++ b/net/batman-adv/gateway_client.h | |||
@@ -31,6 +31,7 @@ void gw_node_update(struct bat_priv *bat_priv, | |||
31 | void gw_node_delete(struct bat_priv *bat_priv, struct orig_node *orig_node); | 31 | void gw_node_delete(struct bat_priv *bat_priv, struct orig_node *orig_node); |
32 | void gw_node_purge(struct bat_priv *bat_priv); | 32 | void gw_node_purge(struct bat_priv *bat_priv); |
33 | int gw_client_seq_print_text(struct seq_file *seq, void *offset); | 33 | int gw_client_seq_print_text(struct seq_file *seq, void *offset); |
34 | int gw_is_target(struct bat_priv *bat_priv, struct sk_buff *skb); | 34 | int gw_is_target(struct bat_priv *bat_priv, struct sk_buff *skb, |
35 | struct orig_node *old_gw); | ||
35 | 36 | ||
36 | #endif /* _NET_BATMAN_ADV_GATEWAY_CLIENT_H_ */ | 37 | #endif /* _NET_BATMAN_ADV_GATEWAY_CLIENT_H_ */ |
diff --git a/net/batman-adv/gateway_common.c b/net/batman-adv/gateway_common.c index e74307be8e0c..18661af0bc3b 100644 --- a/net/batman-adv/gateway_common.c +++ b/net/batman-adv/gateway_common.c | |||
@@ -61,9 +61,9 @@ static void kbit_to_gw_bandwidth(int down, int up, long *gw_srv_class) | |||
61 | /* returns the up and downspeeds in kbit, calculated from the class */ | 61 | /* returns the up and downspeeds in kbit, calculated from the class */ |
62 | void gw_bandwidth_to_kbit(uint8_t gw_srv_class, int *down, int *up) | 62 | void gw_bandwidth_to_kbit(uint8_t gw_srv_class, int *down, int *up) |
63 | { | 63 | { |
64 | char sbit = (gw_srv_class & 0x80) >> 7; | 64 | int sbit = (gw_srv_class & 0x80) >> 7; |
65 | char dpart = (gw_srv_class & 0x78) >> 3; | 65 | int dpart = (gw_srv_class & 0x78) >> 3; |
66 | char upart = (gw_srv_class & 0x07); | 66 | int upart = (gw_srv_class & 0x07); |
67 | 67 | ||
68 | if (!gw_srv_class) { | 68 | if (!gw_srv_class) { |
69 | *down = 0; | 69 | *down = 0; |
diff --git a/net/batman-adv/hard-interface.c b/net/batman-adv/hard-interface.c index abb490106f3b..db7aacf1e095 100644 --- a/net/batman-adv/hard-interface.c +++ b/net/batman-adv/hard-interface.c | |||
@@ -152,12 +152,6 @@ static void primary_if_select(struct bat_priv *bat_priv, | |||
152 | batman_packet->ttl = TTL; | 152 | batman_packet->ttl = TTL; |
153 | 153 | ||
154 | primary_if_update_addr(bat_priv); | 154 | primary_if_update_addr(bat_priv); |
155 | |||
156 | /*** | ||
157 | * hacky trick to make sure that we send the TT information via | ||
158 | * our new primary interface | ||
159 | */ | ||
160 | atomic_set(&bat_priv->tt_local_changed, 1); | ||
161 | } | 155 | } |
162 | 156 | ||
163 | static bool hardif_is_iface_up(const struct hard_iface *hard_iface) | 157 | static bool hardif_is_iface_up(const struct hard_iface *hard_iface) |
@@ -340,7 +334,8 @@ int hardif_enable_interface(struct hard_iface *hard_iface, | |||
340 | batman_packet->flags = NO_FLAGS; | 334 | batman_packet->flags = NO_FLAGS; |
341 | batman_packet->ttl = 2; | 335 | batman_packet->ttl = 2; |
342 | batman_packet->tq = TQ_MAX_VALUE; | 336 | batman_packet->tq = TQ_MAX_VALUE; |
343 | batman_packet->num_tt = 0; | 337 | batman_packet->tt_num_changes = 0; |
338 | batman_packet->ttvn = 0; | ||
344 | 339 | ||
345 | hard_iface->if_num = bat_priv->num_ifaces; | 340 | hard_iface->if_num = bat_priv->num_ifaces; |
346 | bat_priv->num_ifaces++; | 341 | bat_priv->num_ifaces++; |
@@ -659,6 +654,14 @@ static int batman_skb_recv(struct sk_buff *skb, struct net_device *dev, | |||
659 | case BAT_VIS: | 654 | case BAT_VIS: |
660 | ret = recv_vis_packet(skb, hard_iface); | 655 | ret = recv_vis_packet(skb, hard_iface); |
661 | break; | 656 | break; |
657 | /* Translation table query (request or response) */ | ||
658 | case BAT_TT_QUERY: | ||
659 | ret = recv_tt_query(skb, hard_iface); | ||
660 | break; | ||
661 | /* Roaming advertisement */ | ||
662 | case BAT_ROAM_ADV: | ||
663 | ret = recv_roam_adv(skb, hard_iface); | ||
664 | break; | ||
662 | default: | 665 | default: |
663 | ret = NET_RX_DROP; | 666 | ret = NET_RX_DROP; |
664 | } | 667 | } |
diff --git a/net/batman-adv/main.c b/net/batman-adv/main.c index 2d6445e171d6..e367e690a9f6 100644 --- a/net/batman-adv/main.c +++ b/net/batman-adv/main.c | |||
@@ -84,8 +84,10 @@ int mesh_init(struct net_device *soft_iface) | |||
84 | 84 | ||
85 | spin_lock_init(&bat_priv->forw_bat_list_lock); | 85 | spin_lock_init(&bat_priv->forw_bat_list_lock); |
86 | spin_lock_init(&bat_priv->forw_bcast_list_lock); | 86 | spin_lock_init(&bat_priv->forw_bcast_list_lock); |
87 | spin_lock_init(&bat_priv->tt_lhash_lock); | 87 | spin_lock_init(&bat_priv->tt_changes_list_lock); |
88 | spin_lock_init(&bat_priv->tt_ghash_lock); | 88 | spin_lock_init(&bat_priv->tt_req_list_lock); |
89 | spin_lock_init(&bat_priv->tt_roam_list_lock); | ||
90 | spin_lock_init(&bat_priv->tt_buff_lock); | ||
89 | spin_lock_init(&bat_priv->gw_list_lock); | 91 | spin_lock_init(&bat_priv->gw_list_lock); |
90 | spin_lock_init(&bat_priv->vis_hash_lock); | 92 | spin_lock_init(&bat_priv->vis_hash_lock); |
91 | spin_lock_init(&bat_priv->vis_list_lock); | 93 | spin_lock_init(&bat_priv->vis_list_lock); |
@@ -96,14 +98,14 @@ int mesh_init(struct net_device *soft_iface) | |||
96 | INIT_HLIST_HEAD(&bat_priv->forw_bcast_list); | 98 | INIT_HLIST_HEAD(&bat_priv->forw_bcast_list); |
97 | INIT_HLIST_HEAD(&bat_priv->gw_list); | 99 | INIT_HLIST_HEAD(&bat_priv->gw_list); |
98 | INIT_HLIST_HEAD(&bat_priv->softif_neigh_vids); | 100 | INIT_HLIST_HEAD(&bat_priv->softif_neigh_vids); |
101 | INIT_LIST_HEAD(&bat_priv->tt_changes_list); | ||
102 | INIT_LIST_HEAD(&bat_priv->tt_req_list); | ||
103 | INIT_LIST_HEAD(&bat_priv->tt_roam_list); | ||
99 | 104 | ||
100 | if (originator_init(bat_priv) < 1) | 105 | if (originator_init(bat_priv) < 1) |
101 | goto err; | 106 | goto err; |
102 | 107 | ||
103 | if (tt_local_init(bat_priv) < 1) | 108 | if (tt_init(bat_priv) < 1) |
104 | goto err; | ||
105 | |||
106 | if (tt_global_init(bat_priv) < 1) | ||
107 | goto err; | 109 | goto err; |
108 | 110 | ||
109 | tt_local_add(soft_iface, soft_iface->dev_addr); | 111 | tt_local_add(soft_iface, soft_iface->dev_addr); |
@@ -111,6 +113,7 @@ int mesh_init(struct net_device *soft_iface) | |||
111 | if (vis_init(bat_priv) < 1) | 113 | if (vis_init(bat_priv) < 1) |
112 | goto err; | 114 | goto err; |
113 | 115 | ||
116 | atomic_set(&bat_priv->gw_reselect, 0); | ||
114 | atomic_set(&bat_priv->mesh_state, MESH_ACTIVE); | 117 | atomic_set(&bat_priv->mesh_state, MESH_ACTIVE); |
115 | goto end; | 118 | goto end; |
116 | 119 | ||
@@ -137,8 +140,7 @@ void mesh_free(struct net_device *soft_iface) | |||
137 | gw_node_purge(bat_priv); | 140 | gw_node_purge(bat_priv); |
138 | originator_free(bat_priv); | 141 | originator_free(bat_priv); |
139 | 142 | ||
140 | tt_local_free(bat_priv); | 143 | tt_free(bat_priv); |
141 | tt_global_free(bat_priv); | ||
142 | 144 | ||
143 | softif_neigh_purge(bat_priv); | 145 | softif_neigh_purge(bat_priv); |
144 | 146 | ||
diff --git a/net/batman-adv/main.h b/net/batman-adv/main.h index ed488cbae80f..4f293b594475 100644 --- a/net/batman-adv/main.h +++ b/net/batman-adv/main.h | |||
@@ -42,15 +42,23 @@ | |||
42 | * -> TODO: check influence on TQ_LOCAL_WINDOW_SIZE */ | 42 | * -> TODO: check influence on TQ_LOCAL_WINDOW_SIZE */ |
43 | #define PURGE_TIMEOUT 200 | 43 | #define PURGE_TIMEOUT 200 |
44 | #define TT_LOCAL_TIMEOUT 3600 /* in seconds */ | 44 | #define TT_LOCAL_TIMEOUT 3600 /* in seconds */ |
45 | 45 | #define TT_CLIENT_ROAM_TIMEOUT 600 | |
46 | /* sliding packet range of received originator messages in squence numbers | 46 | /* sliding packet range of received originator messages in squence numbers |
47 | * (should be a multiple of our word size) */ | 47 | * (should be a multiple of our word size) */ |
48 | #define TQ_LOCAL_WINDOW_SIZE 64 | 48 | #define TQ_LOCAL_WINDOW_SIZE 64 |
49 | #define TT_REQUEST_TIMEOUT 3 /* seconds we have to keep pending tt_req */ | ||
50 | |||
49 | #define TQ_GLOBAL_WINDOW_SIZE 5 | 51 | #define TQ_GLOBAL_WINDOW_SIZE 5 |
50 | #define TQ_LOCAL_BIDRECT_SEND_MINIMUM 1 | 52 | #define TQ_LOCAL_BIDRECT_SEND_MINIMUM 1 |
51 | #define TQ_LOCAL_BIDRECT_RECV_MINIMUM 1 | 53 | #define TQ_LOCAL_BIDRECT_RECV_MINIMUM 1 |
52 | #define TQ_TOTAL_BIDRECT_LIMIT 1 | 54 | #define TQ_TOTAL_BIDRECT_LIMIT 1 |
53 | 55 | ||
56 | #define TT_OGM_APPEND_MAX 3 /* number of OGMs sent with the last tt diff */ | ||
57 | |||
58 | #define ROAMING_MAX_TIME 20 /* Time in which a client can roam at most | ||
59 | * ROAMING_MAX_COUNT times */ | ||
60 | #define ROAMING_MAX_COUNT 5 | ||
61 | |||
54 | #define NO_FLAGS 0 | 62 | #define NO_FLAGS 0 |
55 | 63 | ||
56 | #define NUM_WORDS (TQ_LOCAL_WINDOW_SIZE / WORD_BIT_SIZE) | 64 | #define NUM_WORDS (TQ_LOCAL_WINDOW_SIZE / WORD_BIT_SIZE) |
@@ -83,6 +91,18 @@ enum mesh_state { | |||
83 | #define BCAST_QUEUE_LEN 256 | 91 | #define BCAST_QUEUE_LEN 256 |
84 | #define BATMAN_QUEUE_LEN 256 | 92 | #define BATMAN_QUEUE_LEN 256 |
85 | 93 | ||
94 | enum uev_action { | ||
95 | UEV_ADD = 0, | ||
96 | UEV_DEL, | ||
97 | UEV_CHANGE | ||
98 | }; | ||
99 | |||
100 | enum uev_type { | ||
101 | UEV_GW = 0 | ||
102 | }; | ||
103 | |||
104 | #define GW_THRESHOLD 50 | ||
105 | |||
86 | /* | 106 | /* |
87 | * Debug Messages | 107 | * Debug Messages |
88 | */ | 108 | */ |
@@ -96,7 +116,8 @@ enum mesh_state { | |||
96 | enum dbg_level { | 116 | enum dbg_level { |
97 | DBG_BATMAN = 1 << 0, | 117 | DBG_BATMAN = 1 << 0, |
98 | DBG_ROUTES = 1 << 1, /* route added / changed / deleted */ | 118 | DBG_ROUTES = 1 << 1, /* route added / changed / deleted */ |
99 | DBG_ALL = 3 | 119 | DBG_TT = 1 << 2, /* translation table operations */ |
120 | DBG_ALL = 7 | ||
100 | }; | 121 | }; |
101 | 122 | ||
102 | 123 | ||
@@ -151,7 +172,7 @@ int debug_log(struct bat_priv *bat_priv, const char *fmt, ...) __printf(2, 3); | |||
151 | while (0) | 172 | while (0) |
152 | #else /* !CONFIG_BATMAN_ADV_DEBUG */ | 173 | #else /* !CONFIG_BATMAN_ADV_DEBUG */ |
153 | __printf(3, 4) | 174 | __printf(3, 4) |
154 | static inline void bat_dbg(char type __always_unused, | 175 | static inline void bat_dbg(int type __always_unused, |
155 | struct bat_priv *bat_priv __always_unused, | 176 | struct bat_priv *bat_priv __always_unused, |
156 | const char *fmt __always_unused, ...) | 177 | const char *fmt __always_unused, ...) |
157 | { | 178 | { |
diff --git a/net/batman-adv/originator.c b/net/batman-adv/originator.c index a6c35d4e332b..338b3c597e4a 100644 --- a/net/batman-adv/originator.c +++ b/net/batman-adv/originator.c | |||
@@ -37,6 +37,14 @@ static void start_purge_timer(struct bat_priv *bat_priv) | |||
37 | queue_delayed_work(bat_event_workqueue, &bat_priv->orig_work, 1 * HZ); | 37 | queue_delayed_work(bat_event_workqueue, &bat_priv->orig_work, 1 * HZ); |
38 | } | 38 | } |
39 | 39 | ||
40 | /* returns 1 if they are the same originator */ | ||
41 | static int compare_orig(const struct hlist_node *node, const void *data2) | ||
42 | { | ||
43 | const void *data1 = container_of(node, struct orig_node, hash_entry); | ||
44 | |||
45 | return (memcmp(data1, data2, ETH_ALEN) == 0 ? 1 : 0); | ||
46 | } | ||
47 | |||
40 | int originator_init(struct bat_priv *bat_priv) | 48 | int originator_init(struct bat_priv *bat_priv) |
41 | { | 49 | { |
42 | if (bat_priv->orig_hash) | 50 | if (bat_priv->orig_hash) |
@@ -137,6 +145,7 @@ static void orig_node_free_rcu(struct rcu_head *rcu) | |||
137 | tt_global_del_orig(orig_node->bat_priv, orig_node, | 145 | tt_global_del_orig(orig_node->bat_priv, orig_node, |
138 | "originator timed out"); | 146 | "originator timed out"); |
139 | 147 | ||
148 | kfree(orig_node->tt_buff); | ||
140 | kfree(orig_node->bcast_own); | 149 | kfree(orig_node->bcast_own); |
141 | kfree(orig_node->bcast_own_sum); | 150 | kfree(orig_node->bcast_own_sum); |
142 | kfree(orig_node); | 151 | kfree(orig_node); |
@@ -205,14 +214,18 @@ struct orig_node *get_orig_node(struct bat_priv *bat_priv, const uint8_t *addr) | |||
205 | spin_lock_init(&orig_node->ogm_cnt_lock); | 214 | spin_lock_init(&orig_node->ogm_cnt_lock); |
206 | spin_lock_init(&orig_node->bcast_seqno_lock); | 215 | spin_lock_init(&orig_node->bcast_seqno_lock); |
207 | spin_lock_init(&orig_node->neigh_list_lock); | 216 | spin_lock_init(&orig_node->neigh_list_lock); |
217 | spin_lock_init(&orig_node->tt_buff_lock); | ||
208 | 218 | ||
209 | /* extra reference for return */ | 219 | /* extra reference for return */ |
210 | atomic_set(&orig_node->refcount, 2); | 220 | atomic_set(&orig_node->refcount, 2); |
211 | 221 | ||
222 | orig_node->tt_poss_change = false; | ||
212 | orig_node->bat_priv = bat_priv; | 223 | orig_node->bat_priv = bat_priv; |
213 | memcpy(orig_node->orig, addr, ETH_ALEN); | 224 | memcpy(orig_node->orig, addr, ETH_ALEN); |
214 | orig_node->router = NULL; | 225 | orig_node->router = NULL; |
215 | orig_node->tt_buff = NULL; | 226 | orig_node->tt_buff = NULL; |
227 | orig_node->tt_buff_len = 0; | ||
228 | atomic_set(&orig_node->tt_size, 0); | ||
216 | orig_node->bcast_seqno_reset = jiffies - 1 | 229 | orig_node->bcast_seqno_reset = jiffies - 1 |
217 | - msecs_to_jiffies(RESET_PROTECTION_MS); | 230 | - msecs_to_jiffies(RESET_PROTECTION_MS); |
218 | orig_node->batman_seqno_reset = jiffies - 1 | 231 | orig_node->batman_seqno_reset = jiffies - 1 |
@@ -322,9 +335,7 @@ static bool purge_orig_node(struct bat_priv *bat_priv, | |||
322 | if (purge_orig_neighbors(bat_priv, orig_node, | 335 | if (purge_orig_neighbors(bat_priv, orig_node, |
323 | &best_neigh_node)) { | 336 | &best_neigh_node)) { |
324 | update_routes(bat_priv, orig_node, | 337 | update_routes(bat_priv, orig_node, |
325 | best_neigh_node, | 338 | best_neigh_node); |
326 | orig_node->tt_buff, | ||
327 | orig_node->tt_buff_len); | ||
328 | } | 339 | } |
329 | } | 340 | } |
330 | 341 | ||
diff --git a/net/batman-adv/originator.h b/net/batman-adv/originator.h index 8e307af7aa0d..cfc1f60a96a1 100644 --- a/net/batman-adv/originator.h +++ b/net/batman-adv/originator.h | |||
@@ -40,14 +40,6 @@ int orig_hash_add_if(struct hard_iface *hard_iface, int max_if_num); | |||
40 | int orig_hash_del_if(struct hard_iface *hard_iface, int max_if_num); | 40 | int orig_hash_del_if(struct hard_iface *hard_iface, int max_if_num); |
41 | 41 | ||
42 | 42 | ||
43 | /* returns 1 if they are the same originator */ | ||
44 | static inline int compare_orig(const struct hlist_node *node, const void *data2) | ||
45 | { | ||
46 | const void *data1 = container_of(node, struct orig_node, hash_entry); | ||
47 | |||
48 | return (memcmp(data1, data2, ETH_ALEN) == 0 ? 1 : 0); | ||
49 | } | ||
50 | |||
51 | /* hashfunction to choose an entry in a hash table of given size */ | 43 | /* hashfunction to choose an entry in a hash table of given size */ |
52 | /* hash algorithm from http://en.wikipedia.org/wiki/Hash_table */ | 44 | /* hash algorithm from http://en.wikipedia.org/wiki/Hash_table */ |
53 | static inline int choose_orig(const void *data, int32_t size) | 45 | static inline int choose_orig(const void *data, int32_t size) |
diff --git a/net/batman-adv/packet.h b/net/batman-adv/packet.h index 9f77086a5461..c5f081dfc6d1 100644 --- a/net/batman-adv/packet.h +++ b/net/batman-adv/packet.h | |||
@@ -30,11 +30,13 @@ enum bat_packettype { | |||
30 | BAT_UNICAST = 0x03, | 30 | BAT_UNICAST = 0x03, |
31 | BAT_BCAST = 0x04, | 31 | BAT_BCAST = 0x04, |
32 | BAT_VIS = 0x05, | 32 | BAT_VIS = 0x05, |
33 | BAT_UNICAST_FRAG = 0x06 | 33 | BAT_UNICAST_FRAG = 0x06, |
34 | BAT_TT_QUERY = 0x07, | ||
35 | BAT_ROAM_ADV = 0x08 | ||
34 | }; | 36 | }; |
35 | 37 | ||
36 | /* this file is included by batctl which needs these defines */ | 38 | /* this file is included by batctl which needs these defines */ |
37 | #define COMPAT_VERSION 12 | 39 | #define COMPAT_VERSION 14 |
38 | 40 | ||
39 | enum batman_flags { | 41 | enum batman_flags { |
40 | PRIMARIES_FIRST_HOP = 1 << 4, | 42 | PRIMARIES_FIRST_HOP = 1 << 4, |
@@ -63,18 +65,38 @@ enum unicast_frag_flags { | |||
63 | UNI_FRAG_LARGETAIL = 1 << 1 | 65 | UNI_FRAG_LARGETAIL = 1 << 1 |
64 | }; | 66 | }; |
65 | 67 | ||
68 | /* TT_QUERY subtypes */ | ||
69 | #define TT_QUERY_TYPE_MASK 0x3 | ||
70 | |||
71 | enum tt_query_packettype { | ||
72 | TT_REQUEST = 0, | ||
73 | TT_RESPONSE = 1 | ||
74 | }; | ||
75 | |||
76 | /* TT_QUERY flags */ | ||
77 | enum tt_query_flags { | ||
78 | TT_FULL_TABLE = 1 << 2 | ||
79 | }; | ||
80 | |||
81 | /* TT_CHANGE flags */ | ||
82 | enum tt_change_flags { | ||
83 | TT_CHANGE_DEL = 0x01, | ||
84 | TT_CLIENT_ROAM = 0x02 | ||
85 | }; | ||
86 | |||
66 | struct batman_packet { | 87 | struct batman_packet { |
67 | uint8_t packet_type; | 88 | uint8_t packet_type; |
68 | uint8_t version; /* batman version field */ | 89 | uint8_t version; /* batman version field */ |
90 | uint8_t ttl; | ||
69 | uint8_t flags; /* 0x40: DIRECTLINK flag, 0x20 VIS_SERVER flag... */ | 91 | uint8_t flags; /* 0x40: DIRECTLINK flag, 0x20 VIS_SERVER flag... */ |
70 | uint8_t tq; | ||
71 | uint32_t seqno; | 92 | uint32_t seqno; |
72 | uint8_t orig[6]; | 93 | uint8_t orig[6]; |
73 | uint8_t prev_sender[6]; | 94 | uint8_t prev_sender[6]; |
74 | uint8_t ttl; | ||
75 | uint8_t num_tt; | ||
76 | uint8_t gw_flags; /* flags related to gateway class */ | 95 | uint8_t gw_flags; /* flags related to gateway class */ |
77 | uint8_t align; | 96 | uint8_t tq; |
97 | uint8_t tt_num_changes; | ||
98 | uint8_t ttvn; /* translation table version number */ | ||
99 | uint16_t tt_crc; | ||
78 | } __packed; | 100 | } __packed; |
79 | 101 | ||
80 | #define BAT_PACKET_LEN sizeof(struct batman_packet) | 102 | #define BAT_PACKET_LEN sizeof(struct batman_packet) |
@@ -82,12 +104,13 @@ struct batman_packet { | |||
82 | struct icmp_packet { | 104 | struct icmp_packet { |
83 | uint8_t packet_type; | 105 | uint8_t packet_type; |
84 | uint8_t version; /* batman version field */ | 106 | uint8_t version; /* batman version field */ |
85 | uint8_t msg_type; /* see ICMP message types above */ | ||
86 | uint8_t ttl; | 107 | uint8_t ttl; |
108 | uint8_t msg_type; /* see ICMP message types above */ | ||
87 | uint8_t dst[6]; | 109 | uint8_t dst[6]; |
88 | uint8_t orig[6]; | 110 | uint8_t orig[6]; |
89 | uint16_t seqno; | 111 | uint16_t seqno; |
90 | uint8_t uid; | 112 | uint8_t uid; |
113 | uint8_t reserved; | ||
91 | } __packed; | 114 | } __packed; |
92 | 115 | ||
93 | #define BAT_RR_LEN 16 | 116 | #define BAT_RR_LEN 16 |
@@ -97,8 +120,8 @@ struct icmp_packet { | |||
97 | struct icmp_packet_rr { | 120 | struct icmp_packet_rr { |
98 | uint8_t packet_type; | 121 | uint8_t packet_type; |
99 | uint8_t version; /* batman version field */ | 122 | uint8_t version; /* batman version field */ |
100 | uint8_t msg_type; /* see ICMP message types above */ | ||
101 | uint8_t ttl; | 123 | uint8_t ttl; |
124 | uint8_t msg_type; /* see ICMP message types above */ | ||
102 | uint8_t dst[6]; | 125 | uint8_t dst[6]; |
103 | uint8_t orig[6]; | 126 | uint8_t orig[6]; |
104 | uint16_t seqno; | 127 | uint16_t seqno; |
@@ -110,16 +133,19 @@ struct icmp_packet_rr { | |||
110 | struct unicast_packet { | 133 | struct unicast_packet { |
111 | uint8_t packet_type; | 134 | uint8_t packet_type; |
112 | uint8_t version; /* batman version field */ | 135 | uint8_t version; /* batman version field */ |
113 | uint8_t dest[6]; | ||
114 | uint8_t ttl; | 136 | uint8_t ttl; |
137 | uint8_t ttvn; /* destination translation table version number */ | ||
138 | uint8_t dest[6]; | ||
115 | } __packed; | 139 | } __packed; |
116 | 140 | ||
117 | struct unicast_frag_packet { | 141 | struct unicast_frag_packet { |
118 | uint8_t packet_type; | 142 | uint8_t packet_type; |
119 | uint8_t version; /* batman version field */ | 143 | uint8_t version; /* batman version field */ |
120 | uint8_t dest[6]; | ||
121 | uint8_t ttl; | 144 | uint8_t ttl; |
145 | uint8_t ttvn; /* destination translation table version number */ | ||
146 | uint8_t dest[6]; | ||
122 | uint8_t flags; | 147 | uint8_t flags; |
148 | uint8_t align; | ||
123 | uint8_t orig[6]; | 149 | uint8_t orig[6]; |
124 | uint16_t seqno; | 150 | uint16_t seqno; |
125 | } __packed; | 151 | } __packed; |
@@ -127,21 +153,61 @@ struct unicast_frag_packet { | |||
127 | struct bcast_packet { | 153 | struct bcast_packet { |
128 | uint8_t packet_type; | 154 | uint8_t packet_type; |
129 | uint8_t version; /* batman version field */ | 155 | uint8_t version; /* batman version field */ |
130 | uint8_t orig[6]; | ||
131 | uint8_t ttl; | 156 | uint8_t ttl; |
157 | uint8_t reserved; | ||
132 | uint32_t seqno; | 158 | uint32_t seqno; |
159 | uint8_t orig[6]; | ||
133 | } __packed; | 160 | } __packed; |
134 | 161 | ||
135 | struct vis_packet { | 162 | struct vis_packet { |
136 | uint8_t packet_type; | 163 | uint8_t packet_type; |
137 | uint8_t version; /* batman version field */ | 164 | uint8_t version; /* batman version field */ |
165 | uint8_t ttl; /* TTL */ | ||
138 | uint8_t vis_type; /* which type of vis-participant sent this? */ | 166 | uint8_t vis_type; /* which type of vis-participant sent this? */ |
139 | uint8_t entries; /* number of entries behind this struct */ | ||
140 | uint32_t seqno; /* sequence number */ | 167 | uint32_t seqno; /* sequence number */ |
141 | uint8_t ttl; /* TTL */ | 168 | uint8_t entries; /* number of entries behind this struct */ |
169 | uint8_t reserved; | ||
142 | uint8_t vis_orig[6]; /* originator that announces its neighbors */ | 170 | uint8_t vis_orig[6]; /* originator that announces its neighbors */ |
143 | uint8_t target_orig[6]; /* who should receive this packet */ | 171 | uint8_t target_orig[6]; /* who should receive this packet */ |
144 | uint8_t sender_orig[6]; /* who sent or rebroadcasted this packet */ | 172 | uint8_t sender_orig[6]; /* who sent or rebroadcasted this packet */ |
145 | } __packed; | 173 | } __packed; |
146 | 174 | ||
175 | struct tt_query_packet { | ||
176 | uint8_t packet_type; | ||
177 | uint8_t version; /* batman version field */ | ||
178 | uint8_t ttl; | ||
179 | /* the flag field is a combination of: | ||
180 | * - TT_REQUEST or TT_RESPONSE | ||
181 | * - TT_FULL_TABLE */ | ||
182 | uint8_t flags; | ||
183 | uint8_t dst[ETH_ALEN]; | ||
184 | uint8_t src[ETH_ALEN]; | ||
185 | /* the ttvn field is: | ||
186 | * if TT_REQUEST: ttvn that triggered the | ||
187 | * request | ||
188 | * if TT_RESPONSE: new ttvn for the src | ||
189 | * orig_node */ | ||
190 | uint8_t ttvn; | ||
191 | /* tt_data field is: | ||
192 | * if TT_REQUEST: crc associated with the | ||
193 | * ttvn | ||
194 | * if TT_RESPONSE: table_size */ | ||
195 | uint16_t tt_data; | ||
196 | } __packed; | ||
197 | |||
198 | struct roam_adv_packet { | ||
199 | uint8_t packet_type; | ||
200 | uint8_t version; | ||
201 | uint8_t ttl; | ||
202 | uint8_t reserved; | ||
203 | uint8_t dst[ETH_ALEN]; | ||
204 | uint8_t src[ETH_ALEN]; | ||
205 | uint8_t client[ETH_ALEN]; | ||
206 | } __packed; | ||
207 | |||
208 | struct tt_change { | ||
209 | uint8_t flags; | ||
210 | uint8_t addr[ETH_ALEN]; | ||
211 | } __packed; | ||
212 | |||
147 | #endif /* _NET_BATMAN_ADV_PACKET_H_ */ | 213 | #endif /* _NET_BATMAN_ADV_PACKET_H_ */ |
diff --git a/net/batman-adv/routing.c b/net/batman-adv/routing.c index 934f1f2f86c6..0ce090c9fe86 100644 --- a/net/batman-adv/routing.c +++ b/net/batman-adv/routing.c | |||
@@ -64,27 +64,57 @@ void slide_own_bcast_window(struct hard_iface *hard_iface) | |||
64 | } | 64 | } |
65 | } | 65 | } |
66 | 66 | ||
67 | static void update_TT(struct bat_priv *bat_priv, struct orig_node *orig_node, | 67 | static void update_transtable(struct bat_priv *bat_priv, |
68 | const unsigned char *tt_buff, int tt_buff_len) | 68 | struct orig_node *orig_node, |
69 | const unsigned char *tt_buff, | ||
70 | uint8_t tt_num_changes, uint8_t ttvn, | ||
71 | uint16_t tt_crc) | ||
69 | { | 72 | { |
70 | if ((tt_buff_len != orig_node->tt_buff_len) || | 73 | uint8_t orig_ttvn = (uint8_t)atomic_read(&orig_node->last_ttvn); |
71 | ((tt_buff_len > 0) && | 74 | bool full_table = true; |
72 | (orig_node->tt_buff_len > 0) && | 75 | |
73 | (memcmp(orig_node->tt_buff, tt_buff, tt_buff_len) != 0))) { | 76 | /* the ttvn increased by one -> we can apply the attached changes */ |
74 | 77 | if (ttvn - orig_ttvn == 1) { | |
75 | if (orig_node->tt_buff_len > 0) | 78 | /* the OGM could not contain the changes because they were too |
76 | tt_global_del_orig(bat_priv, orig_node, | 79 | * many to fit in one frame or because they have already been |
77 | "originator changed tt"); | 80 | * sent TT_OGM_APPEND_MAX times. In this case send a tt |
78 | 81 | * request */ | |
79 | if ((tt_buff_len > 0) && (tt_buff)) | 82 | if (!tt_num_changes) { |
80 | tt_global_add_orig(bat_priv, orig_node, | 83 | full_table = false; |
81 | tt_buff, tt_buff_len); | 84 | goto request_table; |
85 | } | ||
86 | |||
87 | tt_update_changes(bat_priv, orig_node, tt_num_changes, ttvn, | ||
88 | (struct tt_change *)tt_buff); | ||
89 | |||
90 | /* Even if we received the crc into the OGM, we prefer | ||
91 | * to recompute it to spot any possible inconsistency | ||
92 | * in the global table */ | ||
93 | orig_node->tt_crc = tt_global_crc(bat_priv, orig_node); | ||
94 | /* Roaming phase is over: tables are in sync again. I can | ||
95 | * unset the flag */ | ||
96 | orig_node->tt_poss_change = false; | ||
97 | } else { | ||
98 | /* if we missed more than one change or our tables are not | ||
99 | * in sync anymore -> request fresh tt data */ | ||
100 | if (ttvn != orig_ttvn || orig_node->tt_crc != tt_crc) { | ||
101 | request_table: | ||
102 | bat_dbg(DBG_TT, bat_priv, "TT inconsistency for %pM. " | ||
103 | "Need to retrieve the correct information " | ||
104 | "(ttvn: %u last_ttvn: %u crc: %u last_crc: " | ||
105 | "%u num_changes: %u)\n", orig_node->orig, ttvn, | ||
106 | orig_ttvn, tt_crc, orig_node->tt_crc, | ||
107 | tt_num_changes); | ||
108 | send_tt_request(bat_priv, orig_node, ttvn, tt_crc, | ||
109 | full_table); | ||
110 | return; | ||
111 | } | ||
82 | } | 112 | } |
83 | } | 113 | } |
84 | 114 | ||
85 | static void update_route(struct bat_priv *bat_priv, struct orig_node *orig_node, | 115 | static void update_route(struct bat_priv *bat_priv, |
86 | struct neigh_node *neigh_node, | 116 | struct orig_node *orig_node, |
87 | const unsigned char *tt_buff, int tt_buff_len) | 117 | struct neigh_node *neigh_node) |
88 | { | 118 | { |
89 | struct neigh_node *curr_router; | 119 | struct neigh_node *curr_router; |
90 | 120 | ||
@@ -92,11 +122,10 @@ static void update_route(struct bat_priv *bat_priv, struct orig_node *orig_node, | |||
92 | 122 | ||
93 | /* route deleted */ | 123 | /* route deleted */ |
94 | if ((curr_router) && (!neigh_node)) { | 124 | if ((curr_router) && (!neigh_node)) { |
95 | |||
96 | bat_dbg(DBG_ROUTES, bat_priv, "Deleting route towards: %pM\n", | 125 | bat_dbg(DBG_ROUTES, bat_priv, "Deleting route towards: %pM\n", |
97 | orig_node->orig); | 126 | orig_node->orig); |
98 | tt_global_del_orig(bat_priv, orig_node, | 127 | tt_global_del_orig(bat_priv, orig_node, |
99 | "originator timed out"); | 128 | "Deleted route towards originator"); |
100 | 129 | ||
101 | /* route added */ | 130 | /* route added */ |
102 | } else if ((!curr_router) && (neigh_node)) { | 131 | } else if ((!curr_router) && (neigh_node)) { |
@@ -104,9 +133,6 @@ static void update_route(struct bat_priv *bat_priv, struct orig_node *orig_node, | |||
104 | bat_dbg(DBG_ROUTES, bat_priv, | 133 | bat_dbg(DBG_ROUTES, bat_priv, |
105 | "Adding route towards: %pM (via %pM)\n", | 134 | "Adding route towards: %pM (via %pM)\n", |
106 | orig_node->orig, neigh_node->addr); | 135 | orig_node->orig, neigh_node->addr); |
107 | tt_global_add_orig(bat_priv, orig_node, | ||
108 | tt_buff, tt_buff_len); | ||
109 | |||
110 | /* route changed */ | 136 | /* route changed */ |
111 | } else if (neigh_node && curr_router) { | 137 | } else if (neigh_node && curr_router) { |
112 | bat_dbg(DBG_ROUTES, bat_priv, | 138 | bat_dbg(DBG_ROUTES, bat_priv, |
@@ -133,8 +159,7 @@ static void update_route(struct bat_priv *bat_priv, struct orig_node *orig_node, | |||
133 | } | 159 | } |
134 | 160 | ||
135 | void update_routes(struct bat_priv *bat_priv, struct orig_node *orig_node, | 161 | void update_routes(struct bat_priv *bat_priv, struct orig_node *orig_node, |
136 | struct neigh_node *neigh_node, const unsigned char *tt_buff, | 162 | struct neigh_node *neigh_node) |
137 | int tt_buff_len) | ||
138 | { | 163 | { |
139 | struct neigh_node *router = NULL; | 164 | struct neigh_node *router = NULL; |
140 | 165 | ||
@@ -144,11 +169,7 @@ void update_routes(struct bat_priv *bat_priv, struct orig_node *orig_node, | |||
144 | router = orig_node_get_router(orig_node); | 169 | router = orig_node_get_router(orig_node); |
145 | 170 | ||
146 | if (router != neigh_node) | 171 | if (router != neigh_node) |
147 | update_route(bat_priv, orig_node, neigh_node, | 172 | update_route(bat_priv, orig_node, neigh_node); |
148 | tt_buff, tt_buff_len); | ||
149 | /* may be just TT changed */ | ||
150 | else | ||
151 | update_TT(bat_priv, orig_node, tt_buff, tt_buff_len); | ||
152 | 173 | ||
153 | out: | 174 | out: |
154 | if (router) | 175 | if (router) |
@@ -163,7 +184,7 @@ static int is_bidirectional_neigh(struct orig_node *orig_node, | |||
163 | struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface); | 184 | struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface); |
164 | struct neigh_node *neigh_node = NULL, *tmp_neigh_node; | 185 | struct neigh_node *neigh_node = NULL, *tmp_neigh_node; |
165 | struct hlist_node *node; | 186 | struct hlist_node *node; |
166 | unsigned char total_count; | 187 | uint8_t total_count; |
167 | uint8_t orig_eq_count, neigh_rq_count, tq_own; | 188 | uint8_t orig_eq_count, neigh_rq_count, tq_own; |
168 | int tq_asym_penalty, ret = 0; | 189 | int tq_asym_penalty, ret = 0; |
169 | 190 | ||
@@ -360,14 +381,12 @@ static void update_orig(struct bat_priv *bat_priv, struct orig_node *orig_node, | |||
360 | const struct ethhdr *ethhdr, | 381 | const struct ethhdr *ethhdr, |
361 | const struct batman_packet *batman_packet, | 382 | const struct batman_packet *batman_packet, |
362 | struct hard_iface *if_incoming, | 383 | struct hard_iface *if_incoming, |
363 | const unsigned char *tt_buff, int tt_buff_len, | 384 | const unsigned char *tt_buff, int is_duplicate) |
364 | char is_duplicate) | ||
365 | { | 385 | { |
366 | struct neigh_node *neigh_node = NULL, *tmp_neigh_node = NULL; | 386 | struct neigh_node *neigh_node = NULL, *tmp_neigh_node = NULL; |
367 | struct neigh_node *router = NULL; | 387 | struct neigh_node *router = NULL; |
368 | struct orig_node *orig_node_tmp; | 388 | struct orig_node *orig_node_tmp; |
369 | struct hlist_node *node; | 389 | struct hlist_node *node; |
370 | int tmp_tt_buff_len; | ||
371 | uint8_t bcast_own_sum_orig, bcast_own_sum_neigh; | 390 | uint8_t bcast_own_sum_orig, bcast_own_sum_neigh; |
372 | 391 | ||
373 | bat_dbg(DBG_BATMAN, bat_priv, "update_originator(): " | 392 | bat_dbg(DBG_BATMAN, bat_priv, "update_originator(): " |
@@ -432,9 +451,6 @@ static void update_orig(struct bat_priv *bat_priv, struct orig_node *orig_node, | |||
432 | 451 | ||
433 | bonding_candidate_add(orig_node, neigh_node); | 452 | bonding_candidate_add(orig_node, neigh_node); |
434 | 453 | ||
435 | tmp_tt_buff_len = (tt_buff_len > batman_packet->num_tt * ETH_ALEN ? | ||
436 | batman_packet->num_tt * ETH_ALEN : tt_buff_len); | ||
437 | |||
438 | /* if this neighbor already is our next hop there is nothing | 454 | /* if this neighbor already is our next hop there is nothing |
439 | * to change */ | 455 | * to change */ |
440 | router = orig_node_get_router(orig_node); | 456 | router = orig_node_get_router(orig_node); |
@@ -464,15 +480,19 @@ static void update_orig(struct bat_priv *bat_priv, struct orig_node *orig_node, | |||
464 | goto update_tt; | 480 | goto update_tt; |
465 | } | 481 | } |
466 | 482 | ||
467 | update_routes(bat_priv, orig_node, neigh_node, | 483 | update_routes(bat_priv, orig_node, neigh_node); |
468 | tt_buff, tmp_tt_buff_len); | ||
469 | goto update_gw; | ||
470 | 484 | ||
471 | update_tt: | 485 | update_tt: |
472 | update_routes(bat_priv, orig_node, router, | 486 | /* I have to check for transtable changes only if the OGM has been |
473 | tt_buff, tmp_tt_buff_len); | 487 | * sent through a primary interface */ |
488 | if (((batman_packet->orig != ethhdr->h_source) && | ||
489 | (batman_packet->ttl > 2)) || | ||
490 | (batman_packet->flags & PRIMARIES_FIRST_HOP)) | ||
491 | update_transtable(bat_priv, orig_node, tt_buff, | ||
492 | batman_packet->tt_num_changes, | ||
493 | batman_packet->ttvn, | ||
494 | batman_packet->tt_crc); | ||
474 | 495 | ||
475 | update_gw: | ||
476 | if (orig_node->gw_flags != batman_packet->gw_flags) | 496 | if (orig_node->gw_flags != batman_packet->gw_flags) |
477 | gw_node_update(bat_priv, orig_node, batman_packet->gw_flags); | 497 | gw_node_update(bat_priv, orig_node, batman_packet->gw_flags); |
478 | 498 | ||
@@ -528,7 +548,7 @@ static int window_protected(struct bat_priv *bat_priv, | |||
528 | * -1 the packet is old and has been received while the seqno window | 548 | * -1 the packet is old and has been received while the seqno window |
529 | * was protected. Caller should drop it. | 549 | * was protected. Caller should drop it. |
530 | */ | 550 | */ |
531 | static char count_real_packets(const struct ethhdr *ethhdr, | 551 | static int count_real_packets(const struct ethhdr *ethhdr, |
532 | const struct batman_packet *batman_packet, | 552 | const struct batman_packet *batman_packet, |
533 | const struct hard_iface *if_incoming) | 553 | const struct hard_iface *if_incoming) |
534 | { | 554 | { |
@@ -536,7 +556,7 @@ static char count_real_packets(const struct ethhdr *ethhdr, | |||
536 | struct orig_node *orig_node; | 556 | struct orig_node *orig_node; |
537 | struct neigh_node *tmp_neigh_node; | 557 | struct neigh_node *tmp_neigh_node; |
538 | struct hlist_node *node; | 558 | struct hlist_node *node; |
539 | char is_duplicate = 0; | 559 | int is_duplicate = 0; |
540 | int32_t seq_diff; | 560 | int32_t seq_diff; |
541 | int need_update = 0; | 561 | int need_update = 0; |
542 | int set_mark, ret = -1; | 562 | int set_mark, ret = -1; |
@@ -594,7 +614,7 @@ out: | |||
594 | 614 | ||
595 | void receive_bat_packet(const struct ethhdr *ethhdr, | 615 | void receive_bat_packet(const struct ethhdr *ethhdr, |
596 | struct batman_packet *batman_packet, | 616 | struct batman_packet *batman_packet, |
597 | const unsigned char *tt_buff, int tt_buff_len, | 617 | const unsigned char *tt_buff, |
598 | struct hard_iface *if_incoming) | 618 | struct hard_iface *if_incoming) |
599 | { | 619 | { |
600 | struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface); | 620 | struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface); |
@@ -602,10 +622,10 @@ void receive_bat_packet(const struct ethhdr *ethhdr, | |||
602 | struct orig_node *orig_neigh_node, *orig_node; | 622 | struct orig_node *orig_neigh_node, *orig_node; |
603 | struct neigh_node *router = NULL, *router_router = NULL; | 623 | struct neigh_node *router = NULL, *router_router = NULL; |
604 | struct neigh_node *orig_neigh_router = NULL; | 624 | struct neigh_node *orig_neigh_router = NULL; |
605 | char has_directlink_flag; | 625 | int has_directlink_flag; |
606 | char is_my_addr = 0, is_my_orig = 0, is_my_oldorig = 0; | 626 | int is_my_addr = 0, is_my_orig = 0, is_my_oldorig = 0; |
607 | char is_broadcast = 0, is_bidirectional, is_single_hop_neigh; | 627 | int is_broadcast = 0, is_bidirectional, is_single_hop_neigh; |
608 | char is_duplicate; | 628 | int is_duplicate; |
609 | uint32_t if_incoming_seqno; | 629 | uint32_t if_incoming_seqno; |
610 | 630 | ||
611 | /* Silently drop when the batman packet is actually not a | 631 | /* Silently drop when the batman packet is actually not a |
@@ -633,12 +653,14 @@ void receive_bat_packet(const struct ethhdr *ethhdr, | |||
633 | 653 | ||
634 | bat_dbg(DBG_BATMAN, bat_priv, | 654 | bat_dbg(DBG_BATMAN, bat_priv, |
635 | "Received BATMAN packet via NB: %pM, IF: %s [%pM] " | 655 | "Received BATMAN packet via NB: %pM, IF: %s [%pM] " |
636 | "(from OG: %pM, via prev OG: %pM, seqno %d, tq %d, " | 656 | "(from OG: %pM, via prev OG: %pM, seqno %d, ttvn %u, " |
637 | "TTL %d, V %d, IDF %d)\n", | 657 | "crc %u, changes %u, td %d, TTL %d, V %d, IDF %d)\n", |
638 | ethhdr->h_source, if_incoming->net_dev->name, | 658 | ethhdr->h_source, if_incoming->net_dev->name, |
639 | if_incoming->net_dev->dev_addr, batman_packet->orig, | 659 | if_incoming->net_dev->dev_addr, batman_packet->orig, |
640 | batman_packet->prev_sender, batman_packet->seqno, | 660 | batman_packet->prev_sender, batman_packet->seqno, |
641 | batman_packet->tq, batman_packet->ttl, batman_packet->version, | 661 | batman_packet->ttvn, batman_packet->tt_crc, |
662 | batman_packet->tt_num_changes, batman_packet->tq, | ||
663 | batman_packet->ttl, batman_packet->version, | ||
642 | has_directlink_flag); | 664 | has_directlink_flag); |
643 | 665 | ||
644 | rcu_read_lock(); | 666 | rcu_read_lock(); |
@@ -790,14 +812,14 @@ void receive_bat_packet(const struct ethhdr *ethhdr, | |||
790 | ((orig_node->last_real_seqno == batman_packet->seqno) && | 812 | ((orig_node->last_real_seqno == batman_packet->seqno) && |
791 | (orig_node->last_ttl - 3 <= batman_packet->ttl)))) | 813 | (orig_node->last_ttl - 3 <= batman_packet->ttl)))) |
792 | update_orig(bat_priv, orig_node, ethhdr, batman_packet, | 814 | update_orig(bat_priv, orig_node, ethhdr, batman_packet, |
793 | if_incoming, tt_buff, tt_buff_len, is_duplicate); | 815 | if_incoming, tt_buff, is_duplicate); |
794 | 816 | ||
795 | /* is single hop (direct) neighbor */ | 817 | /* is single hop (direct) neighbor */ |
796 | if (is_single_hop_neigh) { | 818 | if (is_single_hop_neigh) { |
797 | 819 | ||
798 | /* mark direct link on incoming interface */ | 820 | /* mark direct link on incoming interface */ |
799 | schedule_forward_packet(orig_node, ethhdr, batman_packet, | 821 | schedule_forward_packet(orig_node, ethhdr, batman_packet, |
800 | 1, tt_buff_len, if_incoming); | 822 | 1, if_incoming); |
801 | 823 | ||
802 | bat_dbg(DBG_BATMAN, bat_priv, "Forwarding packet: " | 824 | bat_dbg(DBG_BATMAN, bat_priv, "Forwarding packet: " |
803 | "rebroadcast neighbor packet with direct link flag\n"); | 825 | "rebroadcast neighbor packet with direct link flag\n"); |
@@ -820,7 +842,7 @@ void receive_bat_packet(const struct ethhdr *ethhdr, | |||
820 | bat_dbg(DBG_BATMAN, bat_priv, | 842 | bat_dbg(DBG_BATMAN, bat_priv, |
821 | "Forwarding packet: rebroadcast originator packet\n"); | 843 | "Forwarding packet: rebroadcast originator packet\n"); |
822 | schedule_forward_packet(orig_node, ethhdr, batman_packet, | 844 | schedule_forward_packet(orig_node, ethhdr, batman_packet, |
823 | 0, tt_buff_len, if_incoming); | 845 | 0, if_incoming); |
824 | 846 | ||
825 | out_neigh: | 847 | out_neigh: |
826 | if ((orig_neigh_node) && (!is_single_hop_neigh)) | 848 | if ((orig_neigh_node) && (!is_single_hop_neigh)) |
@@ -1167,6 +1189,118 @@ static struct neigh_node *find_ifalter_router(struct orig_node *primary_orig, | |||
1167 | return router; | 1189 | return router; |
1168 | } | 1190 | } |
1169 | 1191 | ||
1192 | int recv_tt_query(struct sk_buff *skb, struct hard_iface *recv_if) | ||
1193 | { | ||
1194 | struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface); | ||
1195 | struct tt_query_packet *tt_query; | ||
1196 | struct ethhdr *ethhdr; | ||
1197 | |||
1198 | /* drop packet if it has not necessary minimum size */ | ||
1199 | if (unlikely(!pskb_may_pull(skb, sizeof(struct tt_query_packet)))) | ||
1200 | goto out; | ||
1201 | |||
1202 | /* I could need to modify it */ | ||
1203 | if (skb_cow(skb, sizeof(struct tt_query_packet)) < 0) | ||
1204 | goto out; | ||
1205 | |||
1206 | ethhdr = (struct ethhdr *)skb_mac_header(skb); | ||
1207 | |||
1208 | /* packet with unicast indication but broadcast recipient */ | ||
1209 | if (is_broadcast_ether_addr(ethhdr->h_dest)) | ||
1210 | goto out; | ||
1211 | |||
1212 | /* packet with broadcast sender address */ | ||
1213 | if (is_broadcast_ether_addr(ethhdr->h_source)) | ||
1214 | goto out; | ||
1215 | |||
1216 | tt_query = (struct tt_query_packet *)skb->data; | ||
1217 | |||
1218 | tt_query->tt_data = ntohs(tt_query->tt_data); | ||
1219 | |||
1220 | switch (tt_query->flags & TT_QUERY_TYPE_MASK) { | ||
1221 | case TT_REQUEST: | ||
1222 | /* If we cannot provide an answer the tt_request is | ||
1223 | * forwarded */ | ||
1224 | if (!send_tt_response(bat_priv, tt_query)) { | ||
1225 | bat_dbg(DBG_TT, bat_priv, | ||
1226 | "Routing TT_REQUEST to %pM [%c]\n", | ||
1227 | tt_query->dst, | ||
1228 | (tt_query->flags & TT_FULL_TABLE ? 'F' : '.')); | ||
1229 | tt_query->tt_data = htons(tt_query->tt_data); | ||
1230 | return route_unicast_packet(skb, recv_if); | ||
1231 | } | ||
1232 | break; | ||
1233 | case TT_RESPONSE: | ||
1234 | /* packet needs to be linearised to access the TT changes */ | ||
1235 | if (skb_linearize(skb) < 0) | ||
1236 | goto out; | ||
1237 | |||
1238 | if (is_my_mac(tt_query->dst)) | ||
1239 | handle_tt_response(bat_priv, tt_query); | ||
1240 | else { | ||
1241 | bat_dbg(DBG_TT, bat_priv, | ||
1242 | "Routing TT_RESPONSE to %pM [%c]\n", | ||
1243 | tt_query->dst, | ||
1244 | (tt_query->flags & TT_FULL_TABLE ? 'F' : '.')); | ||
1245 | tt_query->tt_data = htons(tt_query->tt_data); | ||
1246 | return route_unicast_packet(skb, recv_if); | ||
1247 | } | ||
1248 | break; | ||
1249 | } | ||
1250 | |||
1251 | out: | ||
1252 | /* returning NET_RX_DROP will make the caller function kfree the skb */ | ||
1253 | return NET_RX_DROP; | ||
1254 | } | ||
1255 | |||
1256 | int recv_roam_adv(struct sk_buff *skb, struct hard_iface *recv_if) | ||
1257 | { | ||
1258 | struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface); | ||
1259 | struct roam_adv_packet *roam_adv_packet; | ||
1260 | struct orig_node *orig_node; | ||
1261 | struct ethhdr *ethhdr; | ||
1262 | |||
1263 | /* drop packet if it has not necessary minimum size */ | ||
1264 | if (unlikely(!pskb_may_pull(skb, sizeof(struct roam_adv_packet)))) | ||
1265 | goto out; | ||
1266 | |||
1267 | ethhdr = (struct ethhdr *)skb_mac_header(skb); | ||
1268 | |||
1269 | /* packet with unicast indication but broadcast recipient */ | ||
1270 | if (is_broadcast_ether_addr(ethhdr->h_dest)) | ||
1271 | goto out; | ||
1272 | |||
1273 | /* packet with broadcast sender address */ | ||
1274 | if (is_broadcast_ether_addr(ethhdr->h_source)) | ||
1275 | goto out; | ||
1276 | |||
1277 | roam_adv_packet = (struct roam_adv_packet *)skb->data; | ||
1278 | |||
1279 | if (!is_my_mac(roam_adv_packet->dst)) | ||
1280 | return route_unicast_packet(skb, recv_if); | ||
1281 | |||
1282 | orig_node = orig_hash_find(bat_priv, roam_adv_packet->src); | ||
1283 | if (!orig_node) | ||
1284 | goto out; | ||
1285 | |||
1286 | bat_dbg(DBG_TT, bat_priv, "Received ROAMING_ADV from %pM " | ||
1287 | "(client %pM)\n", roam_adv_packet->src, | ||
1288 | roam_adv_packet->client); | ||
1289 | |||
1290 | tt_global_add(bat_priv, orig_node, roam_adv_packet->client, | ||
1291 | atomic_read(&orig_node->last_ttvn) + 1, true); | ||
1292 | |||
1293 | /* Roaming phase starts: I have new information but the ttvn has not | ||
1294 | * been incremented yet. This flag will make me check all the incoming | ||
1295 | * packets for the correct destination. */ | ||
1296 | bat_priv->tt_poss_change = true; | ||
1297 | |||
1298 | orig_node_free_ref(orig_node); | ||
1299 | out: | ||
1300 | /* returning NET_RX_DROP will make the caller function kfree the skb */ | ||
1301 | return NET_RX_DROP; | ||
1302 | } | ||
1303 | |||
1170 | /* find a suitable router for this originator, and use | 1304 | /* find a suitable router for this originator, and use |
1171 | * bonding if possible. increases the found neighbors | 1305 | * bonding if possible. increases the found neighbors |
1172 | * refcount.*/ | 1306 | * refcount.*/ |
@@ -1353,14 +1487,84 @@ out: | |||
1353 | return ret; | 1487 | return ret; |
1354 | } | 1488 | } |
1355 | 1489 | ||
1490 | static int check_unicast_ttvn(struct bat_priv *bat_priv, | ||
1491 | struct sk_buff *skb) { | ||
1492 | uint8_t curr_ttvn; | ||
1493 | struct orig_node *orig_node; | ||
1494 | struct ethhdr *ethhdr; | ||
1495 | struct hard_iface *primary_if; | ||
1496 | struct unicast_packet *unicast_packet; | ||
1497 | bool tt_poss_change; | ||
1498 | |||
1499 | /* I could need to modify it */ | ||
1500 | if (skb_cow(skb, sizeof(struct unicast_packet)) < 0) | ||
1501 | return 0; | ||
1502 | |||
1503 | unicast_packet = (struct unicast_packet *)skb->data; | ||
1504 | |||
1505 | if (is_my_mac(unicast_packet->dest)) { | ||
1506 | tt_poss_change = bat_priv->tt_poss_change; | ||
1507 | curr_ttvn = (uint8_t)atomic_read(&bat_priv->ttvn); | ||
1508 | } else { | ||
1509 | orig_node = orig_hash_find(bat_priv, unicast_packet->dest); | ||
1510 | |||
1511 | if (!orig_node) | ||
1512 | return 0; | ||
1513 | |||
1514 | curr_ttvn = (uint8_t)atomic_read(&orig_node->last_ttvn); | ||
1515 | tt_poss_change = orig_node->tt_poss_change; | ||
1516 | orig_node_free_ref(orig_node); | ||
1517 | } | ||
1518 | |||
1519 | /* Check whether I have to reroute the packet */ | ||
1520 | if (seq_before(unicast_packet->ttvn, curr_ttvn) || tt_poss_change) { | ||
1521 | /* Linearize the skb before accessing it */ | ||
1522 | if (skb_linearize(skb) < 0) | ||
1523 | return 0; | ||
1524 | |||
1525 | ethhdr = (struct ethhdr *)(skb->data + | ||
1526 | sizeof(struct unicast_packet)); | ||
1527 | orig_node = transtable_search(bat_priv, ethhdr->h_dest); | ||
1528 | |||
1529 | if (!orig_node) { | ||
1530 | if (!is_my_client(bat_priv, ethhdr->h_dest)) | ||
1531 | return 0; | ||
1532 | primary_if = primary_if_get_selected(bat_priv); | ||
1533 | if (!primary_if) | ||
1534 | return 0; | ||
1535 | memcpy(unicast_packet->dest, | ||
1536 | primary_if->net_dev->dev_addr, ETH_ALEN); | ||
1537 | hardif_free_ref(primary_if); | ||
1538 | } else { | ||
1539 | memcpy(unicast_packet->dest, orig_node->orig, | ||
1540 | ETH_ALEN); | ||
1541 | curr_ttvn = (uint8_t) | ||
1542 | atomic_read(&orig_node->last_ttvn); | ||
1543 | orig_node_free_ref(orig_node); | ||
1544 | } | ||
1545 | |||
1546 | bat_dbg(DBG_ROUTES, bat_priv, "TTVN mismatch (old_ttvn %u " | ||
1547 | "new_ttvn %u)! Rerouting unicast packet (for %pM) to " | ||
1548 | "%pM\n", unicast_packet->ttvn, curr_ttvn, | ||
1549 | ethhdr->h_dest, unicast_packet->dest); | ||
1550 | |||
1551 | unicast_packet->ttvn = curr_ttvn; | ||
1552 | } | ||
1553 | return 1; | ||
1554 | } | ||
1555 | |||
1356 | int recv_unicast_packet(struct sk_buff *skb, struct hard_iface *recv_if) | 1556 | int recv_unicast_packet(struct sk_buff *skb, struct hard_iface *recv_if) |
1357 | { | 1557 | { |
1558 | struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface); | ||
1358 | struct unicast_packet *unicast_packet; | 1559 | struct unicast_packet *unicast_packet; |
1359 | int hdr_size = sizeof(*unicast_packet); | 1560 | int hdr_size = sizeof(*unicast_packet); |
1360 | 1561 | ||
1361 | if (check_unicast_packet(skb, hdr_size) < 0) | 1562 | if (check_unicast_packet(skb, hdr_size) < 0) |
1362 | return NET_RX_DROP; | 1563 | return NET_RX_DROP; |
1363 | 1564 | ||
1565 | if (!check_unicast_ttvn(bat_priv, skb)) | ||
1566 | return NET_RX_DROP; | ||
1567 | |||
1364 | unicast_packet = (struct unicast_packet *)skb->data; | 1568 | unicast_packet = (struct unicast_packet *)skb->data; |
1365 | 1569 | ||
1366 | /* packet for me */ | 1570 | /* packet for me */ |
@@ -1383,6 +1587,9 @@ int recv_ucast_frag_packet(struct sk_buff *skb, struct hard_iface *recv_if) | |||
1383 | if (check_unicast_packet(skb, hdr_size) < 0) | 1587 | if (check_unicast_packet(skb, hdr_size) < 0) |
1384 | return NET_RX_DROP; | 1588 | return NET_RX_DROP; |
1385 | 1589 | ||
1590 | if (!check_unicast_ttvn(bat_priv, skb)) | ||
1591 | return NET_RX_DROP; | ||
1592 | |||
1386 | unicast_packet = (struct unicast_frag_packet *)skb->data; | 1593 | unicast_packet = (struct unicast_frag_packet *)skb->data; |
1387 | 1594 | ||
1388 | /* packet for me */ | 1595 | /* packet for me */ |
diff --git a/net/batman-adv/routing.h b/net/batman-adv/routing.h index 0ce03923ec05..fb14e9579b19 100644 --- a/net/batman-adv/routing.h +++ b/net/batman-adv/routing.h | |||
@@ -25,11 +25,10 @@ | |||
25 | void slide_own_bcast_window(struct hard_iface *hard_iface); | 25 | void slide_own_bcast_window(struct hard_iface *hard_iface); |
26 | void receive_bat_packet(const struct ethhdr *ethhdr, | 26 | void receive_bat_packet(const struct ethhdr *ethhdr, |
27 | struct batman_packet *batman_packet, | 27 | struct batman_packet *batman_packet, |
28 | const unsigned char *tt_buff, int tt_buff_len, | 28 | const unsigned char *tt_buff, |
29 | struct hard_iface *if_incoming); | 29 | struct hard_iface *if_incoming); |
30 | void update_routes(struct bat_priv *bat_priv, struct orig_node *orig_node, | 30 | void update_routes(struct bat_priv *bat_priv, struct orig_node *orig_node, |
31 | struct neigh_node *neigh_node, const unsigned char *tt_buff, | 31 | struct neigh_node *neigh_node); |
32 | int tt_buff_len); | ||
33 | int route_unicast_packet(struct sk_buff *skb, struct hard_iface *recv_if); | 32 | int route_unicast_packet(struct sk_buff *skb, struct hard_iface *recv_if); |
34 | int recv_icmp_packet(struct sk_buff *skb, struct hard_iface *recv_if); | 33 | int recv_icmp_packet(struct sk_buff *skb, struct hard_iface *recv_if); |
35 | int recv_unicast_packet(struct sk_buff *skb, struct hard_iface *recv_if); | 34 | int recv_unicast_packet(struct sk_buff *skb, struct hard_iface *recv_if); |
@@ -37,6 +36,8 @@ int recv_ucast_frag_packet(struct sk_buff *skb, struct hard_iface *recv_if); | |||
37 | int recv_bcast_packet(struct sk_buff *skb, struct hard_iface *recv_if); | 36 | int recv_bcast_packet(struct sk_buff *skb, struct hard_iface *recv_if); |
38 | int recv_vis_packet(struct sk_buff *skb, struct hard_iface *recv_if); | 37 | int recv_vis_packet(struct sk_buff *skb, struct hard_iface *recv_if); |
39 | int recv_bat_packet(struct sk_buff *skb, struct hard_iface *recv_if); | 38 | int recv_bat_packet(struct sk_buff *skb, struct hard_iface *recv_if); |
39 | int recv_tt_query(struct sk_buff *skb, struct hard_iface *recv_if); | ||
40 | int recv_roam_adv(struct sk_buff *skb, struct hard_iface *recv_if); | ||
40 | struct neigh_node *find_router(struct bat_priv *bat_priv, | 41 | struct neigh_node *find_router(struct bat_priv *bat_priv, |
41 | struct orig_node *orig_node, | 42 | struct orig_node *orig_node, |
42 | const struct hard_iface *recv_if); | 43 | const struct hard_iface *recv_if); |
diff --git a/net/batman-adv/send.c b/net/batman-adv/send.c index a1b8c3173a3f..7a2f0823f1c2 100644 --- a/net/batman-adv/send.c +++ b/net/batman-adv/send.c | |||
@@ -120,7 +120,7 @@ static void send_packet_to_if(struct forw_packet *forw_packet, | |||
120 | /* adjust all flags and log packets */ | 120 | /* adjust all flags and log packets */ |
121 | while (aggregated_packet(buff_pos, | 121 | while (aggregated_packet(buff_pos, |
122 | forw_packet->packet_len, | 122 | forw_packet->packet_len, |
123 | batman_packet->num_tt)) { | 123 | batman_packet->tt_num_changes)) { |
124 | 124 | ||
125 | /* we might have aggregated direct link packets with an | 125 | /* we might have aggregated direct link packets with an |
126 | * ordinary base packet */ | 126 | * ordinary base packet */ |
@@ -135,17 +135,17 @@ static void send_packet_to_if(struct forw_packet *forw_packet, | |||
135 | "Forwarding")); | 135 | "Forwarding")); |
136 | bat_dbg(DBG_BATMAN, bat_priv, | 136 | bat_dbg(DBG_BATMAN, bat_priv, |
137 | "%s %spacket (originator %pM, seqno %d, TQ %d, TTL %d," | 137 | "%s %spacket (originator %pM, seqno %d, TQ %d, TTL %d," |
138 | " IDF %s) on interface %s [%pM]\n", | 138 | " IDF %s, hvn %d) on interface %s [%pM]\n", |
139 | fwd_str, (packet_num > 0 ? "aggregated " : ""), | 139 | fwd_str, (packet_num > 0 ? "aggregated " : ""), |
140 | batman_packet->orig, ntohl(batman_packet->seqno), | 140 | batman_packet->orig, ntohl(batman_packet->seqno), |
141 | batman_packet->tq, batman_packet->ttl, | 141 | batman_packet->tq, batman_packet->ttl, |
142 | (batman_packet->flags & DIRECTLINK ? | 142 | (batman_packet->flags & DIRECTLINK ? |
143 | "on" : "off"), | 143 | "on" : "off"), |
144 | hard_iface->net_dev->name, | 144 | batman_packet->ttvn, hard_iface->net_dev->name, |
145 | hard_iface->net_dev->dev_addr); | 145 | hard_iface->net_dev->dev_addr); |
146 | 146 | ||
147 | buff_pos += sizeof(*batman_packet) + | 147 | buff_pos += sizeof(*batman_packet) + |
148 | (batman_packet->num_tt * ETH_ALEN); | 148 | tt_len(batman_packet->tt_num_changes); |
149 | packet_num++; | 149 | packet_num++; |
150 | batman_packet = (struct batman_packet *) | 150 | batman_packet = (struct batman_packet *) |
151 | (forw_packet->skb->data + buff_pos); | 151 | (forw_packet->skb->data + buff_pos); |
@@ -165,7 +165,7 @@ static void send_packet(struct forw_packet *forw_packet) | |||
165 | struct bat_priv *bat_priv; | 165 | struct bat_priv *bat_priv; |
166 | struct batman_packet *batman_packet = | 166 | struct batman_packet *batman_packet = |
167 | (struct batman_packet *)(forw_packet->skb->data); | 167 | (struct batman_packet *)(forw_packet->skb->data); |
168 | unsigned char directlink = (batman_packet->flags & DIRECTLINK ? 1 : 0); | 168 | int directlink = (batman_packet->flags & DIRECTLINK ? 1 : 0); |
169 | 169 | ||
170 | if (!forw_packet->if_incoming) { | 170 | if (!forw_packet->if_incoming) { |
171 | pr_err("Error - can't forward packet: incoming iface not " | 171 | pr_err("Error - can't forward packet: incoming iface not " |
@@ -213,25 +213,18 @@ static void send_packet(struct forw_packet *forw_packet) | |||
213 | rcu_read_unlock(); | 213 | rcu_read_unlock(); |
214 | } | 214 | } |
215 | 215 | ||
216 | static void rebuild_batman_packet(struct bat_priv *bat_priv, | 216 | static void realloc_packet_buffer(struct hard_iface *hard_iface, |
217 | struct hard_iface *hard_iface) | 217 | int new_len) |
218 | { | 218 | { |
219 | int new_len; | ||
220 | unsigned char *new_buff; | 219 | unsigned char *new_buff; |
221 | struct batman_packet *batman_packet; | 220 | struct batman_packet *batman_packet; |
222 | 221 | ||
223 | new_len = sizeof(*batman_packet) + (bat_priv->num_local_tt * ETH_ALEN); | ||
224 | new_buff = kmalloc(new_len, GFP_ATOMIC); | 222 | new_buff = kmalloc(new_len, GFP_ATOMIC); |
225 | 223 | ||
226 | /* keep old buffer if kmalloc should fail */ | 224 | /* keep old buffer if kmalloc should fail */ |
227 | if (new_buff) { | 225 | if (new_buff) { |
228 | memcpy(new_buff, hard_iface->packet_buff, | 226 | memcpy(new_buff, hard_iface->packet_buff, |
229 | sizeof(*batman_packet)); | 227 | sizeof(*batman_packet)); |
230 | batman_packet = (struct batman_packet *)new_buff; | ||
231 | |||
232 | batman_packet->num_tt = tt_local_fill_buffer(bat_priv, | ||
233 | new_buff + sizeof(*batman_packet), | ||
234 | new_len - sizeof(*batman_packet)); | ||
235 | 228 | ||
236 | kfree(hard_iface->packet_buff); | 229 | kfree(hard_iface->packet_buff); |
237 | hard_iface->packet_buff = new_buff; | 230 | hard_iface->packet_buff = new_buff; |
@@ -239,6 +232,46 @@ static void rebuild_batman_packet(struct bat_priv *bat_priv, | |||
239 | } | 232 | } |
240 | } | 233 | } |
241 | 234 | ||
235 | /* when calling this function (hard_iface == primary_if) has to be true */ | ||
236 | static void prepare_packet_buffer(struct bat_priv *bat_priv, | ||
237 | struct hard_iface *hard_iface) | ||
238 | { | ||
239 | int new_len; | ||
240 | struct batman_packet *batman_packet; | ||
241 | |||
242 | new_len = BAT_PACKET_LEN + | ||
243 | tt_len((uint8_t)atomic_read(&bat_priv->tt_local_changes)); | ||
244 | |||
245 | /* if we have too many changes for one packet don't send any | ||
246 | * and wait for the tt table request which will be fragmented */ | ||
247 | if (new_len > hard_iface->soft_iface->mtu) | ||
248 | new_len = BAT_PACKET_LEN; | ||
249 | |||
250 | realloc_packet_buffer(hard_iface, new_len); | ||
251 | batman_packet = (struct batman_packet *)hard_iface->packet_buff; | ||
252 | |||
253 | atomic_set(&bat_priv->tt_crc, tt_local_crc(bat_priv)); | ||
254 | |||
255 | /* reset the sending counter */ | ||
256 | atomic_set(&bat_priv->tt_ogm_append_cnt, TT_OGM_APPEND_MAX); | ||
257 | |||
258 | batman_packet->tt_num_changes = tt_changes_fill_buffer(bat_priv, | ||
259 | hard_iface->packet_buff + BAT_PACKET_LEN, | ||
260 | hard_iface->packet_len - BAT_PACKET_LEN); | ||
261 | |||
262 | } | ||
263 | |||
264 | static void reset_packet_buffer(struct bat_priv *bat_priv, | ||
265 | struct hard_iface *hard_iface) | ||
266 | { | ||
267 | struct batman_packet *batman_packet; | ||
268 | |||
269 | realloc_packet_buffer(hard_iface, BAT_PACKET_LEN); | ||
270 | |||
271 | batman_packet = (struct batman_packet *)hard_iface->packet_buff; | ||
272 | batman_packet->tt_num_changes = 0; | ||
273 | } | ||
274 | |||
242 | void schedule_own_packet(struct hard_iface *hard_iface) | 275 | void schedule_own_packet(struct hard_iface *hard_iface) |
243 | { | 276 | { |
244 | struct bat_priv *bat_priv = netdev_priv(hard_iface->soft_iface); | 277 | struct bat_priv *bat_priv = netdev_priv(hard_iface->soft_iface); |
@@ -264,14 +297,23 @@ void schedule_own_packet(struct hard_iface *hard_iface) | |||
264 | if (hard_iface->if_status == IF_TO_BE_ACTIVATED) | 297 | if (hard_iface->if_status == IF_TO_BE_ACTIVATED) |
265 | hard_iface->if_status = IF_ACTIVE; | 298 | hard_iface->if_status = IF_ACTIVE; |
266 | 299 | ||
267 | /* if local tt has changed and interface is a primary interface */ | 300 | if (hard_iface == primary_if) { |
268 | if ((atomic_read(&bat_priv->tt_local_changed)) && | 301 | /* if at least one change happened */ |
269 | (hard_iface == primary_if)) | 302 | if (atomic_read(&bat_priv->tt_local_changes) > 0) { |
270 | rebuild_batman_packet(bat_priv, hard_iface); | 303 | prepare_packet_buffer(bat_priv, hard_iface); |
304 | /* Increment the TTVN only once per OGM interval */ | ||
305 | atomic_inc(&bat_priv->ttvn); | ||
306 | bat_priv->tt_poss_change = false; | ||
307 | } | ||
308 | |||
309 | /* if the changes have been sent enough times */ | ||
310 | if (!atomic_dec_not_zero(&bat_priv->tt_ogm_append_cnt)) | ||
311 | reset_packet_buffer(bat_priv, hard_iface); | ||
312 | } | ||
271 | 313 | ||
272 | /** | 314 | /** |
273 | * NOTE: packet_buff might just have been re-allocated in | 315 | * NOTE: packet_buff might just have been re-allocated in |
274 | * rebuild_batman_packet() | 316 | * prepare_packet_buffer() or in reset_packet_buffer() |
275 | */ | 317 | */ |
276 | batman_packet = (struct batman_packet *)hard_iface->packet_buff; | 318 | batman_packet = (struct batman_packet *)hard_iface->packet_buff; |
277 | 319 | ||
@@ -279,6 +321,9 @@ void schedule_own_packet(struct hard_iface *hard_iface) | |||
279 | batman_packet->seqno = | 321 | batman_packet->seqno = |
280 | htonl((uint32_t)atomic_read(&hard_iface->seqno)); | 322 | htonl((uint32_t)atomic_read(&hard_iface->seqno)); |
281 | 323 | ||
324 | batman_packet->ttvn = atomic_read(&bat_priv->ttvn); | ||
325 | batman_packet->tt_crc = htons((uint16_t)atomic_read(&bat_priv->tt_crc)); | ||
326 | |||
282 | if (vis_server == VIS_TYPE_SERVER_SYNC) | 327 | if (vis_server == VIS_TYPE_SERVER_SYNC) |
283 | batman_packet->flags |= VIS_SERVER; | 328 | batman_packet->flags |= VIS_SERVER; |
284 | else | 329 | else |
@@ -307,13 +352,14 @@ void schedule_own_packet(struct hard_iface *hard_iface) | |||
307 | void schedule_forward_packet(struct orig_node *orig_node, | 352 | void schedule_forward_packet(struct orig_node *orig_node, |
308 | const struct ethhdr *ethhdr, | 353 | const struct ethhdr *ethhdr, |
309 | struct batman_packet *batman_packet, | 354 | struct batman_packet *batman_packet, |
310 | uint8_t directlink, int tt_buff_len, | 355 | int directlink, |
311 | struct hard_iface *if_incoming) | 356 | struct hard_iface *if_incoming) |
312 | { | 357 | { |
313 | struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface); | 358 | struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface); |
314 | struct neigh_node *router; | 359 | struct neigh_node *router; |
315 | unsigned char in_tq, in_ttl, tq_avg = 0; | 360 | uint8_t in_tq, in_ttl, tq_avg = 0; |
316 | unsigned long send_time; | 361 | unsigned long send_time; |
362 | uint8_t tt_num_changes; | ||
317 | 363 | ||
318 | if (batman_packet->ttl <= 1) { | 364 | if (batman_packet->ttl <= 1) { |
319 | bat_dbg(DBG_BATMAN, bat_priv, "ttl exceeded\n"); | 365 | bat_dbg(DBG_BATMAN, bat_priv, "ttl exceeded\n"); |
@@ -324,6 +370,7 @@ void schedule_forward_packet(struct orig_node *orig_node, | |||
324 | 370 | ||
325 | in_tq = batman_packet->tq; | 371 | in_tq = batman_packet->tq; |
326 | in_ttl = batman_packet->ttl; | 372 | in_ttl = batman_packet->ttl; |
373 | tt_num_changes = batman_packet->tt_num_changes; | ||
327 | 374 | ||
328 | batman_packet->ttl--; | 375 | batman_packet->ttl--; |
329 | memcpy(batman_packet->prev_sender, ethhdr->h_source, ETH_ALEN); | 376 | memcpy(batman_packet->prev_sender, ethhdr->h_source, ETH_ALEN); |
@@ -356,6 +403,7 @@ void schedule_forward_packet(struct orig_node *orig_node, | |||
356 | batman_packet->ttl); | 403 | batman_packet->ttl); |
357 | 404 | ||
358 | batman_packet->seqno = htonl(batman_packet->seqno); | 405 | batman_packet->seqno = htonl(batman_packet->seqno); |
406 | batman_packet->tt_crc = htons(batman_packet->tt_crc); | ||
359 | 407 | ||
360 | /* switch of primaries first hop flag when forwarding */ | 408 | /* switch of primaries first hop flag when forwarding */ |
361 | batman_packet->flags &= ~PRIMARIES_FIRST_HOP; | 409 | batman_packet->flags &= ~PRIMARIES_FIRST_HOP; |
@@ -367,7 +415,7 @@ void schedule_forward_packet(struct orig_node *orig_node, | |||
367 | send_time = forward_send_time(); | 415 | send_time = forward_send_time(); |
368 | add_bat_packet_to_list(bat_priv, | 416 | add_bat_packet_to_list(bat_priv, |
369 | (unsigned char *)batman_packet, | 417 | (unsigned char *)batman_packet, |
370 | sizeof(*batman_packet) + tt_buff_len, | 418 | sizeof(*batman_packet) + tt_len(tt_num_changes), |
371 | if_incoming, 0, send_time); | 419 | if_incoming, 0, send_time); |
372 | } | 420 | } |
373 | 421 | ||
diff --git a/net/batman-adv/send.h b/net/batman-adv/send.h index eceab870024d..633224ab028a 100644 --- a/net/batman-adv/send.h +++ b/net/batman-adv/send.h | |||
@@ -28,7 +28,7 @@ void schedule_own_packet(struct hard_iface *hard_iface); | |||
28 | void schedule_forward_packet(struct orig_node *orig_node, | 28 | void schedule_forward_packet(struct orig_node *orig_node, |
29 | const struct ethhdr *ethhdr, | 29 | const struct ethhdr *ethhdr, |
30 | struct batman_packet *batman_packet, | 30 | struct batman_packet *batman_packet, |
31 | uint8_t directlink, int tt_buff_len, | 31 | int directlink, |
32 | struct hard_iface *if_outgoing); | 32 | struct hard_iface *if_outgoing); |
33 | int add_bcast_packet_to_list(struct bat_priv *bat_priv, | 33 | int add_bcast_packet_to_list(struct bat_priv *bat_priv, |
34 | const struct sk_buff *skb); | 34 | const struct sk_buff *skb); |
diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c index b8d3f248efdc..2dcdbb7a236c 100644 --- a/net/batman-adv/soft-interface.c +++ b/net/batman-adv/soft-interface.c | |||
@@ -30,6 +30,7 @@ | |||
30 | #include "gateway_common.h" | 30 | #include "gateway_common.h" |
31 | #include "gateway_client.h" | 31 | #include "gateway_client.h" |
32 | #include "bat_sysfs.h" | 32 | #include "bat_sysfs.h" |
33 | #include "originator.h" | ||
33 | #include <linux/slab.h> | 34 | #include <linux/slab.h> |
34 | #include <linux/ethtool.h> | 35 | #include <linux/ethtool.h> |
35 | #include <linux/etherdevice.h> | 36 | #include <linux/etherdevice.h> |
@@ -380,7 +381,7 @@ void softif_neigh_purge(struct bat_priv *bat_priv) | |||
380 | struct softif_neigh *softif_neigh, *curr_softif_neigh; | 381 | struct softif_neigh *softif_neigh, *curr_softif_neigh; |
381 | struct softif_neigh_vid *softif_neigh_vid; | 382 | struct softif_neigh_vid *softif_neigh_vid; |
382 | struct hlist_node *node, *node_tmp, *node_tmp2; | 383 | struct hlist_node *node, *node_tmp, *node_tmp2; |
383 | char do_deselect; | 384 | int do_deselect; |
384 | 385 | ||
385 | rcu_read_lock(); | 386 | rcu_read_lock(); |
386 | hlist_for_each_entry_rcu(softif_neigh_vid, node, | 387 | hlist_for_each_entry_rcu(softif_neigh_vid, node, |
@@ -534,7 +535,7 @@ static int interface_set_mac_addr(struct net_device *dev, void *p) | |||
534 | /* only modify transtable if it has been initialised before */ | 535 | /* only modify transtable if it has been initialised before */ |
535 | if (atomic_read(&bat_priv->mesh_state) == MESH_ACTIVE) { | 536 | if (atomic_read(&bat_priv->mesh_state) == MESH_ACTIVE) { |
536 | tt_local_remove(bat_priv, dev->dev_addr, | 537 | tt_local_remove(bat_priv, dev->dev_addr, |
537 | "mac address changed"); | 538 | "mac address changed", false); |
538 | tt_local_add(dev, addr->sa_data); | 539 | tt_local_add(dev, addr->sa_data); |
539 | } | 540 | } |
540 | 541 | ||
@@ -553,7 +554,7 @@ static int interface_change_mtu(struct net_device *dev, int new_mtu) | |||
553 | return 0; | 554 | return 0; |
554 | } | 555 | } |
555 | 556 | ||
556 | int interface_tx(struct sk_buff *skb, struct net_device *soft_iface) | 557 | static int interface_tx(struct sk_buff *skb, struct net_device *soft_iface) |
557 | { | 558 | { |
558 | struct ethhdr *ethhdr = (struct ethhdr *)skb->data; | 559 | struct ethhdr *ethhdr = (struct ethhdr *)skb->data; |
559 | struct bat_priv *bat_priv = netdev_priv(soft_iface); | 560 | struct bat_priv *bat_priv = netdev_priv(soft_iface); |
@@ -561,6 +562,7 @@ int interface_tx(struct sk_buff *skb, struct net_device *soft_iface) | |||
561 | struct bcast_packet *bcast_packet; | 562 | struct bcast_packet *bcast_packet; |
562 | struct vlan_ethhdr *vhdr; | 563 | struct vlan_ethhdr *vhdr; |
563 | struct softif_neigh *curr_softif_neigh = NULL; | 564 | struct softif_neigh *curr_softif_neigh = NULL; |
565 | struct orig_node *orig_node = NULL; | ||
564 | int data_len = skb->len, ret; | 566 | int data_len = skb->len, ret; |
565 | short vid = -1; | 567 | short vid = -1; |
566 | bool do_bcast = false; | 568 | bool do_bcast = false; |
@@ -592,11 +594,13 @@ int interface_tx(struct sk_buff *skb, struct net_device *soft_iface) | |||
592 | if (curr_softif_neigh) | 594 | if (curr_softif_neigh) |
593 | goto dropped; | 595 | goto dropped; |
594 | 596 | ||
595 | /* TODO: check this for locks */ | 597 | /* Register the client MAC in the transtable */ |
596 | tt_local_add(soft_iface, ethhdr->h_source); | 598 | tt_local_add(soft_iface, ethhdr->h_source); |
597 | 599 | ||
598 | if (is_multicast_ether_addr(ethhdr->h_dest)) { | 600 | orig_node = transtable_search(bat_priv, ethhdr->h_dest); |
599 | ret = gw_is_target(bat_priv, skb); | 601 | if (is_multicast_ether_addr(ethhdr->h_dest) || |
602 | (orig_node && orig_node->gw_flags)) { | ||
603 | ret = gw_is_target(bat_priv, skb, orig_node); | ||
600 | 604 | ||
601 | if (ret < 0) | 605 | if (ret < 0) |
602 | goto dropped; | 606 | goto dropped; |
@@ -656,6 +660,8 @@ end: | |||
656 | softif_neigh_free_ref(curr_softif_neigh); | 660 | softif_neigh_free_ref(curr_softif_neigh); |
657 | if (primary_if) | 661 | if (primary_if) |
658 | hardif_free_ref(primary_if); | 662 | hardif_free_ref(primary_if); |
663 | if (orig_node) | ||
664 | orig_node_free_ref(orig_node); | ||
659 | return NETDEV_TX_OK; | 665 | return NETDEV_TX_OK; |
660 | } | 666 | } |
661 | 667 | ||
@@ -830,7 +836,13 @@ struct net_device *softif_create(const char *name) | |||
830 | 836 | ||
831 | atomic_set(&bat_priv->mesh_state, MESH_INACTIVE); | 837 | atomic_set(&bat_priv->mesh_state, MESH_INACTIVE); |
832 | atomic_set(&bat_priv->bcast_seqno, 1); | 838 | atomic_set(&bat_priv->bcast_seqno, 1); |
833 | atomic_set(&bat_priv->tt_local_changed, 0); | 839 | atomic_set(&bat_priv->ttvn, 0); |
840 | atomic_set(&bat_priv->tt_local_changes, 0); | ||
841 | atomic_set(&bat_priv->tt_ogm_append_cnt, 0); | ||
842 | |||
843 | bat_priv->tt_buff = NULL; | ||
844 | bat_priv->tt_buff_len = 0; | ||
845 | bat_priv->tt_poss_change = false; | ||
834 | 846 | ||
835 | bat_priv->primary_if = NULL; | 847 | bat_priv->primary_if = NULL; |
836 | bat_priv->num_ifaces = 0; | 848 | bat_priv->num_ifaces = 0; |
diff --git a/net/batman-adv/soft-interface.h b/net/batman-adv/soft-interface.h index c24906dd1d6a..001546fc96f1 100644 --- a/net/batman-adv/soft-interface.h +++ b/net/batman-adv/soft-interface.h | |||
@@ -25,7 +25,6 @@ | |||
25 | int my_skb_head_push(struct sk_buff *skb, unsigned int len); | 25 | int my_skb_head_push(struct sk_buff *skb, unsigned int len); |
26 | int softif_neigh_seq_print_text(struct seq_file *seq, void *offset); | 26 | int softif_neigh_seq_print_text(struct seq_file *seq, void *offset); |
27 | void softif_neigh_purge(struct bat_priv *bat_priv); | 27 | void softif_neigh_purge(struct bat_priv *bat_priv); |
28 | int interface_tx(struct sk_buff *skb, struct net_device *soft_iface); | ||
29 | void interface_rx(struct net_device *soft_iface, | 28 | void interface_rx(struct net_device *soft_iface, |
30 | struct sk_buff *skb, struct hard_iface *recv_if, | 29 | struct sk_buff *skb, struct hard_iface *recv_if, |
31 | int hdr_size); | 30 | int hdr_size); |
diff --git a/net/batman-adv/translation-table.c b/net/batman-adv/translation-table.c index 561f76968d5e..5f1fcd573633 100644 --- a/net/batman-adv/translation-table.c +++ b/net/batman-adv/translation-table.c | |||
@@ -23,13 +23,17 @@ | |||
23 | #include "translation-table.h" | 23 | #include "translation-table.h" |
24 | #include "soft-interface.h" | 24 | #include "soft-interface.h" |
25 | #include "hard-interface.h" | 25 | #include "hard-interface.h" |
26 | #include "send.h" | ||
26 | #include "hash.h" | 27 | #include "hash.h" |
27 | #include "originator.h" | 28 | #include "originator.h" |
29 | #include "routing.h" | ||
28 | 30 | ||
29 | static void tt_local_purge(struct work_struct *work); | 31 | #include <linux/crc16.h> |
30 | static void _tt_global_del_orig(struct bat_priv *bat_priv, | 32 | |
31 | struct tt_global_entry *tt_global_entry, | 33 | static void _tt_global_del(struct bat_priv *bat_priv, |
32 | const char *message); | 34 | struct tt_global_entry *tt_global_entry, |
35 | const char *message); | ||
36 | static void tt_purge(struct work_struct *work); | ||
33 | 37 | ||
34 | /* returns 1 if they are the same mac addr */ | 38 | /* returns 1 if they are the same mac addr */ |
35 | static int compare_ltt(const struct hlist_node *node, const void *data2) | 39 | static int compare_ltt(const struct hlist_node *node, const void *data2) |
@@ -49,10 +53,11 @@ static int compare_gtt(const struct hlist_node *node, const void *data2) | |||
49 | return (memcmp(data1, data2, ETH_ALEN) == 0 ? 1 : 0); | 53 | return (memcmp(data1, data2, ETH_ALEN) == 0 ? 1 : 0); |
50 | } | 54 | } |
51 | 55 | ||
52 | static void tt_local_start_timer(struct bat_priv *bat_priv) | 56 | static void tt_start_timer(struct bat_priv *bat_priv) |
53 | { | 57 | { |
54 | INIT_DELAYED_WORK(&bat_priv->tt_work, tt_local_purge); | 58 | INIT_DELAYED_WORK(&bat_priv->tt_work, tt_purge); |
55 | queue_delayed_work(bat_event_workqueue, &bat_priv->tt_work, 10 * HZ); | 59 | queue_delayed_work(bat_event_workqueue, &bat_priv->tt_work, |
60 | msecs_to_jiffies(5000)); | ||
56 | } | 61 | } |
57 | 62 | ||
58 | static struct tt_local_entry *tt_local_hash_find(struct bat_priv *bat_priv, | 63 | static struct tt_local_entry *tt_local_hash_find(struct bat_priv *bat_priv, |
@@ -75,6 +80,9 @@ static struct tt_local_entry *tt_local_hash_find(struct bat_priv *bat_priv, | |||
75 | if (!compare_eth(tt_local_entry, data)) | 80 | if (!compare_eth(tt_local_entry, data)) |
76 | continue; | 81 | continue; |
77 | 82 | ||
83 | if (!atomic_inc_not_zero(&tt_local_entry->refcount)) | ||
84 | continue; | ||
85 | |||
78 | tt_local_entry_tmp = tt_local_entry; | 86 | tt_local_entry_tmp = tt_local_entry; |
79 | break; | 87 | break; |
80 | } | 88 | } |
@@ -104,6 +112,9 @@ static struct tt_global_entry *tt_global_hash_find(struct bat_priv *bat_priv, | |||
104 | if (!compare_eth(tt_global_entry, data)) | 112 | if (!compare_eth(tt_global_entry, data)) |
105 | continue; | 113 | continue; |
106 | 114 | ||
115 | if (!atomic_inc_not_zero(&tt_global_entry->refcount)) | ||
116 | continue; | ||
117 | |||
107 | tt_global_entry_tmp = tt_global_entry; | 118 | tt_global_entry_tmp = tt_global_entry; |
108 | break; | 119 | break; |
109 | } | 120 | } |
@@ -112,7 +123,57 @@ static struct tt_global_entry *tt_global_hash_find(struct bat_priv *bat_priv, | |||
112 | return tt_global_entry_tmp; | 123 | return tt_global_entry_tmp; |
113 | } | 124 | } |
114 | 125 | ||
115 | int tt_local_init(struct bat_priv *bat_priv) | 126 | static bool is_out_of_time(unsigned long starting_time, unsigned long timeout) |
127 | { | ||
128 | unsigned long deadline; | ||
129 | deadline = starting_time + msecs_to_jiffies(timeout); | ||
130 | |||
131 | return time_after(jiffies, deadline); | ||
132 | } | ||
133 | |||
134 | static void tt_local_entry_free_ref(struct tt_local_entry *tt_local_entry) | ||
135 | { | ||
136 | if (atomic_dec_and_test(&tt_local_entry->refcount)) | ||
137 | kfree_rcu(tt_local_entry, rcu); | ||
138 | } | ||
139 | |||
140 | static void tt_global_entry_free_ref(struct tt_global_entry *tt_global_entry) | ||
141 | { | ||
142 | if (atomic_dec_and_test(&tt_global_entry->refcount)) | ||
143 | kfree_rcu(tt_global_entry, rcu); | ||
144 | } | ||
145 | |||
146 | static void tt_local_event(struct bat_priv *bat_priv, uint8_t op, | ||
147 | const uint8_t *addr, bool roaming) | ||
148 | { | ||
149 | struct tt_change_node *tt_change_node; | ||
150 | |||
151 | tt_change_node = kmalloc(sizeof(*tt_change_node), GFP_ATOMIC); | ||
152 | |||
153 | if (!tt_change_node) | ||
154 | return; | ||
155 | |||
156 | tt_change_node->change.flags = op; | ||
157 | if (roaming) | ||
158 | tt_change_node->change.flags |= TT_CLIENT_ROAM; | ||
159 | |||
160 | memcpy(tt_change_node->change.addr, addr, ETH_ALEN); | ||
161 | |||
162 | spin_lock_bh(&bat_priv->tt_changes_list_lock); | ||
163 | /* track the change in the OGMinterval list */ | ||
164 | list_add_tail(&tt_change_node->list, &bat_priv->tt_changes_list); | ||
165 | atomic_inc(&bat_priv->tt_local_changes); | ||
166 | spin_unlock_bh(&bat_priv->tt_changes_list_lock); | ||
167 | |||
168 | atomic_set(&bat_priv->tt_ogm_append_cnt, 0); | ||
169 | } | ||
170 | |||
171 | int tt_len(int changes_num) | ||
172 | { | ||
173 | return changes_num * sizeof(struct tt_change); | ||
174 | } | ||
175 | |||
176 | static int tt_local_init(struct bat_priv *bat_priv) | ||
116 | { | 177 | { |
117 | if (bat_priv->tt_local_hash) | 178 | if (bat_priv->tt_local_hash) |
118 | return 1; | 179 | return 1; |
@@ -122,54 +183,35 @@ int tt_local_init(struct bat_priv *bat_priv) | |||
122 | if (!bat_priv->tt_local_hash) | 183 | if (!bat_priv->tt_local_hash) |
123 | return 0; | 184 | return 0; |
124 | 185 | ||
125 | atomic_set(&bat_priv->tt_local_changed, 0); | ||
126 | tt_local_start_timer(bat_priv); | ||
127 | |||
128 | return 1; | 186 | return 1; |
129 | } | 187 | } |
130 | 188 | ||
131 | void tt_local_add(struct net_device *soft_iface, const uint8_t *addr) | 189 | void tt_local_add(struct net_device *soft_iface, const uint8_t *addr) |
132 | { | 190 | { |
133 | struct bat_priv *bat_priv = netdev_priv(soft_iface); | 191 | struct bat_priv *bat_priv = netdev_priv(soft_iface); |
134 | struct tt_local_entry *tt_local_entry; | 192 | struct tt_local_entry *tt_local_entry = NULL; |
135 | struct tt_global_entry *tt_global_entry; | 193 | struct tt_global_entry *tt_global_entry = NULL; |
136 | int required_bytes; | ||
137 | 194 | ||
138 | spin_lock_bh(&bat_priv->tt_lhash_lock); | ||
139 | tt_local_entry = tt_local_hash_find(bat_priv, addr); | 195 | tt_local_entry = tt_local_hash_find(bat_priv, addr); |
140 | spin_unlock_bh(&bat_priv->tt_lhash_lock); | ||
141 | 196 | ||
142 | if (tt_local_entry) { | 197 | if (tt_local_entry) { |
143 | tt_local_entry->last_seen = jiffies; | 198 | tt_local_entry->last_seen = jiffies; |
144 | return; | 199 | goto out; |
145 | } | ||
146 | |||
147 | /* only announce as many hosts as possible in the batman-packet and | ||
148 | space in batman_packet->num_tt That also should give a limit to | ||
149 | MAC-flooding. */ | ||
150 | required_bytes = (bat_priv->num_local_tt + 1) * ETH_ALEN; | ||
151 | required_bytes += BAT_PACKET_LEN; | ||
152 | |||
153 | if ((required_bytes > ETH_DATA_LEN) || | ||
154 | (atomic_read(&bat_priv->aggregated_ogms) && | ||
155 | required_bytes > MAX_AGGREGATION_BYTES) || | ||
156 | (bat_priv->num_local_tt + 1 > 255)) { | ||
157 | bat_dbg(DBG_ROUTES, bat_priv, | ||
158 | "Can't add new local tt entry (%pM): " | ||
159 | "number of local tt entries exceeds packet size\n", | ||
160 | addr); | ||
161 | return; | ||
162 | } | 200 | } |
163 | 201 | ||
164 | bat_dbg(DBG_ROUTES, bat_priv, | ||
165 | "Creating new local tt entry: %pM\n", addr); | ||
166 | |||
167 | tt_local_entry = kmalloc(sizeof(*tt_local_entry), GFP_ATOMIC); | 202 | tt_local_entry = kmalloc(sizeof(*tt_local_entry), GFP_ATOMIC); |
168 | if (!tt_local_entry) | 203 | if (!tt_local_entry) |
169 | return; | 204 | goto out; |
205 | |||
206 | tt_local_event(bat_priv, NO_FLAGS, addr, false); | ||
207 | |||
208 | bat_dbg(DBG_TT, bat_priv, | ||
209 | "Creating new local tt entry: %pM (ttvn: %d)\n", addr, | ||
210 | (uint8_t)atomic_read(&bat_priv->ttvn)); | ||
170 | 211 | ||
171 | memcpy(tt_local_entry->addr, addr, ETH_ALEN); | 212 | memcpy(tt_local_entry->addr, addr, ETH_ALEN); |
172 | tt_local_entry->last_seen = jiffies; | 213 | tt_local_entry->last_seen = jiffies; |
214 | atomic_set(&tt_local_entry->refcount, 2); | ||
173 | 215 | ||
174 | /* the batman interface mac address should never be purged */ | 216 | /* the batman interface mac address should never be purged */ |
175 | if (compare_eth(addr, soft_iface->dev_addr)) | 217 | if (compare_eth(addr, soft_iface->dev_addr)) |
@@ -177,61 +219,75 @@ void tt_local_add(struct net_device *soft_iface, const uint8_t *addr) | |||
177 | else | 219 | else |
178 | tt_local_entry->never_purge = 0; | 220 | tt_local_entry->never_purge = 0; |
179 | 221 | ||
180 | spin_lock_bh(&bat_priv->tt_lhash_lock); | ||
181 | |||
182 | hash_add(bat_priv->tt_local_hash, compare_ltt, choose_orig, | 222 | hash_add(bat_priv->tt_local_hash, compare_ltt, choose_orig, |
183 | tt_local_entry, &tt_local_entry->hash_entry); | 223 | tt_local_entry, &tt_local_entry->hash_entry); |
184 | bat_priv->num_local_tt++; | ||
185 | atomic_set(&bat_priv->tt_local_changed, 1); | ||
186 | 224 | ||
187 | spin_unlock_bh(&bat_priv->tt_lhash_lock); | 225 | atomic_inc(&bat_priv->num_local_tt); |
188 | 226 | ||
189 | /* remove address from global hash if present */ | 227 | /* remove address from global hash if present */ |
190 | spin_lock_bh(&bat_priv->tt_ghash_lock); | ||
191 | |||
192 | tt_global_entry = tt_global_hash_find(bat_priv, addr); | 228 | tt_global_entry = tt_global_hash_find(bat_priv, addr); |
193 | 229 | ||
230 | /* Check whether it is a roaming! */ | ||
231 | if (tt_global_entry) { | ||
232 | /* This node is probably going to update its tt table */ | ||
233 | tt_global_entry->orig_node->tt_poss_change = true; | ||
234 | _tt_global_del(bat_priv, tt_global_entry, | ||
235 | "local tt received"); | ||
236 | send_roam_adv(bat_priv, tt_global_entry->addr, | ||
237 | tt_global_entry->orig_node); | ||
238 | } | ||
239 | out: | ||
240 | if (tt_local_entry) | ||
241 | tt_local_entry_free_ref(tt_local_entry); | ||
194 | if (tt_global_entry) | 242 | if (tt_global_entry) |
195 | _tt_global_del_orig(bat_priv, tt_global_entry, | 243 | tt_global_entry_free_ref(tt_global_entry); |
196 | "local tt received"); | ||
197 | |||
198 | spin_unlock_bh(&bat_priv->tt_ghash_lock); | ||
199 | } | 244 | } |
200 | 245 | ||
201 | int tt_local_fill_buffer(struct bat_priv *bat_priv, | 246 | int tt_changes_fill_buffer(struct bat_priv *bat_priv, |
202 | unsigned char *buff, int buff_len) | 247 | unsigned char *buff, int buff_len) |
203 | { | 248 | { |
204 | struct hashtable_t *hash = bat_priv->tt_local_hash; | 249 | int count = 0, tot_changes = 0; |
205 | struct tt_local_entry *tt_local_entry; | 250 | struct tt_change_node *entry, *safe; |
206 | struct hlist_node *node; | ||
207 | struct hlist_head *head; | ||
208 | int i, count = 0; | ||
209 | 251 | ||
210 | spin_lock_bh(&bat_priv->tt_lhash_lock); | 252 | if (buff_len > 0) |
253 | tot_changes = buff_len / tt_len(1); | ||
211 | 254 | ||
212 | for (i = 0; i < hash->size; i++) { | 255 | spin_lock_bh(&bat_priv->tt_changes_list_lock); |
213 | head = &hash->table[i]; | 256 | atomic_set(&bat_priv->tt_local_changes, 0); |
214 | |||
215 | rcu_read_lock(); | ||
216 | hlist_for_each_entry_rcu(tt_local_entry, node, | ||
217 | head, hash_entry) { | ||
218 | if (buff_len < (count + 1) * ETH_ALEN) | ||
219 | break; | ||
220 | |||
221 | memcpy(buff + (count * ETH_ALEN), tt_local_entry->addr, | ||
222 | ETH_ALEN); | ||
223 | 257 | ||
258 | list_for_each_entry_safe(entry, safe, &bat_priv->tt_changes_list, | ||
259 | list) { | ||
260 | if (count < tot_changes) { | ||
261 | memcpy(buff + tt_len(count), | ||
262 | &entry->change, sizeof(struct tt_change)); | ||
224 | count++; | 263 | count++; |
225 | } | 264 | } |
226 | rcu_read_unlock(); | 265 | list_del(&entry->list); |
266 | kfree(entry); | ||
227 | } | 267 | } |
268 | spin_unlock_bh(&bat_priv->tt_changes_list_lock); | ||
269 | |||
270 | /* Keep the buffer for possible tt_request */ | ||
271 | spin_lock_bh(&bat_priv->tt_buff_lock); | ||
272 | kfree(bat_priv->tt_buff); | ||
273 | bat_priv->tt_buff_len = 0; | ||
274 | bat_priv->tt_buff = NULL; | ||
275 | /* We check whether this new OGM has no changes due to size | ||
276 | * problems */ | ||
277 | if (buff_len > 0) { | ||
278 | /** | ||
279 | * if kmalloc() fails we will reply with the full table | ||
280 | * instead of providing the diff | ||
281 | */ | ||
282 | bat_priv->tt_buff = kmalloc(buff_len, GFP_ATOMIC); | ||
283 | if (bat_priv->tt_buff) { | ||
284 | memcpy(bat_priv->tt_buff, buff, buff_len); | ||
285 | bat_priv->tt_buff_len = buff_len; | ||
286 | } | ||
287 | } | ||
288 | spin_unlock_bh(&bat_priv->tt_buff_lock); | ||
228 | 289 | ||
229 | /* if we did not get all new local tts see you next time ;-) */ | 290 | return tot_changes; |
230 | if (count == bat_priv->num_local_tt) | ||
231 | atomic_set(&bat_priv->tt_local_changed, 0); | ||
232 | |||
233 | spin_unlock_bh(&bat_priv->tt_lhash_lock); | ||
234 | return count; | ||
235 | } | 291 | } |
236 | 292 | ||
237 | int tt_local_seq_print_text(struct seq_file *seq, void *offset) | 293 | int tt_local_seq_print_text(struct seq_file *seq, void *offset) |
@@ -263,10 +319,8 @@ int tt_local_seq_print_text(struct seq_file *seq, void *offset) | |||
263 | } | 319 | } |
264 | 320 | ||
265 | seq_printf(seq, "Locally retrieved addresses (from %s) " | 321 | seq_printf(seq, "Locally retrieved addresses (from %s) " |
266 | "announced via TT:\n", | 322 | "announced via TT (TTVN: %u):\n", |
267 | net_dev->name); | 323 | net_dev->name, (uint8_t)atomic_read(&bat_priv->ttvn)); |
268 | |||
269 | spin_lock_bh(&bat_priv->tt_lhash_lock); | ||
270 | 324 | ||
271 | buf_size = 1; | 325 | buf_size = 1; |
272 | /* Estimate length for: " * xx:xx:xx:xx:xx:xx\n" */ | 326 | /* Estimate length for: " * xx:xx:xx:xx:xx:xx\n" */ |
@@ -281,7 +335,6 @@ int tt_local_seq_print_text(struct seq_file *seq, void *offset) | |||
281 | 335 | ||
282 | buff = kmalloc(buf_size, GFP_ATOMIC); | 336 | buff = kmalloc(buf_size, GFP_ATOMIC); |
283 | if (!buff) { | 337 | if (!buff) { |
284 | spin_unlock_bh(&bat_priv->tt_lhash_lock); | ||
285 | ret = -ENOMEM; | 338 | ret = -ENOMEM; |
286 | goto out; | 339 | goto out; |
287 | } | 340 | } |
@@ -301,8 +354,6 @@ int tt_local_seq_print_text(struct seq_file *seq, void *offset) | |||
301 | rcu_read_unlock(); | 354 | rcu_read_unlock(); |
302 | } | 355 | } |
303 | 356 | ||
304 | spin_unlock_bh(&bat_priv->tt_lhash_lock); | ||
305 | |||
306 | seq_printf(seq, "%s", buff); | 357 | seq_printf(seq, "%s", buff); |
307 | kfree(buff); | 358 | kfree(buff); |
308 | out: | 359 | out: |
@@ -311,92 +362,108 @@ out: | |||
311 | return ret; | 362 | return ret; |
312 | } | 363 | } |
313 | 364 | ||
314 | static void _tt_local_del(struct hlist_node *node, void *arg) | ||
315 | { | ||
316 | struct bat_priv *bat_priv = arg; | ||
317 | void *data = container_of(node, struct tt_local_entry, hash_entry); | ||
318 | |||
319 | kfree(data); | ||
320 | bat_priv->num_local_tt--; | ||
321 | atomic_set(&bat_priv->tt_local_changed, 1); | ||
322 | } | ||
323 | |||
324 | static void tt_local_del(struct bat_priv *bat_priv, | 365 | static void tt_local_del(struct bat_priv *bat_priv, |
325 | struct tt_local_entry *tt_local_entry, | 366 | struct tt_local_entry *tt_local_entry, |
326 | const char *message) | 367 | const char *message) |
327 | { | 368 | { |
328 | bat_dbg(DBG_ROUTES, bat_priv, "Deleting local tt entry (%pM): %s\n", | 369 | bat_dbg(DBG_TT, bat_priv, "Deleting local tt entry (%pM): %s\n", |
329 | tt_local_entry->addr, message); | 370 | tt_local_entry->addr, message); |
330 | 371 | ||
372 | atomic_dec(&bat_priv->num_local_tt); | ||
373 | |||
331 | hash_remove(bat_priv->tt_local_hash, compare_ltt, choose_orig, | 374 | hash_remove(bat_priv->tt_local_hash, compare_ltt, choose_orig, |
332 | tt_local_entry->addr); | 375 | tt_local_entry->addr); |
333 | _tt_local_del(&tt_local_entry->hash_entry, bat_priv); | 376 | |
377 | tt_local_entry_free_ref(tt_local_entry); | ||
334 | } | 378 | } |
335 | 379 | ||
336 | void tt_local_remove(struct bat_priv *bat_priv, | 380 | void tt_local_remove(struct bat_priv *bat_priv, const uint8_t *addr, |
337 | const uint8_t *addr, const char *message) | 381 | const char *message, bool roaming) |
338 | { | 382 | { |
339 | struct tt_local_entry *tt_local_entry; | 383 | struct tt_local_entry *tt_local_entry = NULL; |
340 | |||
341 | spin_lock_bh(&bat_priv->tt_lhash_lock); | ||
342 | 384 | ||
343 | tt_local_entry = tt_local_hash_find(bat_priv, addr); | 385 | tt_local_entry = tt_local_hash_find(bat_priv, addr); |
344 | 386 | ||
345 | if (tt_local_entry) | 387 | if (!tt_local_entry) |
346 | tt_local_del(bat_priv, tt_local_entry, message); | 388 | goto out; |
347 | 389 | ||
348 | spin_unlock_bh(&bat_priv->tt_lhash_lock); | 390 | tt_local_event(bat_priv, TT_CHANGE_DEL, tt_local_entry->addr, roaming); |
391 | tt_local_del(bat_priv, tt_local_entry, message); | ||
392 | out: | ||
393 | if (tt_local_entry) | ||
394 | tt_local_entry_free_ref(tt_local_entry); | ||
349 | } | 395 | } |
350 | 396 | ||
351 | static void tt_local_purge(struct work_struct *work) | 397 | static void tt_local_purge(struct bat_priv *bat_priv) |
352 | { | 398 | { |
353 | struct delayed_work *delayed_work = | ||
354 | container_of(work, struct delayed_work, work); | ||
355 | struct bat_priv *bat_priv = | ||
356 | container_of(delayed_work, struct bat_priv, tt_work); | ||
357 | struct hashtable_t *hash = bat_priv->tt_local_hash; | 399 | struct hashtable_t *hash = bat_priv->tt_local_hash; |
358 | struct tt_local_entry *tt_local_entry; | 400 | struct tt_local_entry *tt_local_entry; |
359 | struct hlist_node *node, *node_tmp; | 401 | struct hlist_node *node, *node_tmp; |
360 | struct hlist_head *head; | 402 | struct hlist_head *head; |
361 | unsigned long timeout; | 403 | spinlock_t *list_lock; /* protects write access to the hash lists */ |
362 | int i; | 404 | int i; |
363 | 405 | ||
364 | spin_lock_bh(&bat_priv->tt_lhash_lock); | ||
365 | |||
366 | for (i = 0; i < hash->size; i++) { | 406 | for (i = 0; i < hash->size; i++) { |
367 | head = &hash->table[i]; | 407 | head = &hash->table[i]; |
408 | list_lock = &hash->list_locks[i]; | ||
368 | 409 | ||
410 | spin_lock_bh(list_lock); | ||
369 | hlist_for_each_entry_safe(tt_local_entry, node, node_tmp, | 411 | hlist_for_each_entry_safe(tt_local_entry, node, node_tmp, |
370 | head, hash_entry) { | 412 | head, hash_entry) { |
371 | if (tt_local_entry->never_purge) | 413 | if (tt_local_entry->never_purge) |
372 | continue; | 414 | continue; |
373 | 415 | ||
374 | timeout = tt_local_entry->last_seen; | 416 | if (!is_out_of_time(tt_local_entry->last_seen, |
375 | timeout += TT_LOCAL_TIMEOUT * HZ; | 417 | TT_LOCAL_TIMEOUT * 1000)) |
376 | |||
377 | if (time_before(jiffies, timeout)) | ||
378 | continue; | 418 | continue; |
379 | 419 | ||
380 | tt_local_del(bat_priv, tt_local_entry, | 420 | tt_local_event(bat_priv, TT_CHANGE_DEL, |
381 | "address timed out"); | 421 | tt_local_entry->addr, false); |
422 | atomic_dec(&bat_priv->num_local_tt); | ||
423 | bat_dbg(DBG_TT, bat_priv, "Deleting local " | ||
424 | "tt entry (%pM): timed out\n", | ||
425 | tt_local_entry->addr); | ||
426 | hlist_del_rcu(node); | ||
427 | tt_local_entry_free_ref(tt_local_entry); | ||
382 | } | 428 | } |
429 | spin_unlock_bh(list_lock); | ||
383 | } | 430 | } |
384 | 431 | ||
385 | spin_unlock_bh(&bat_priv->tt_lhash_lock); | ||
386 | tt_local_start_timer(bat_priv); | ||
387 | } | 432 | } |
388 | 433 | ||
389 | void tt_local_free(struct bat_priv *bat_priv) | 434 | static void tt_local_table_free(struct bat_priv *bat_priv) |
390 | { | 435 | { |
436 | struct hashtable_t *hash; | ||
437 | spinlock_t *list_lock; /* protects write access to the hash lists */ | ||
438 | struct tt_local_entry *tt_local_entry; | ||
439 | struct hlist_node *node, *node_tmp; | ||
440 | struct hlist_head *head; | ||
441 | int i; | ||
442 | |||
391 | if (!bat_priv->tt_local_hash) | 443 | if (!bat_priv->tt_local_hash) |
392 | return; | 444 | return; |
393 | 445 | ||
394 | cancel_delayed_work_sync(&bat_priv->tt_work); | 446 | hash = bat_priv->tt_local_hash; |
395 | hash_delete(bat_priv->tt_local_hash, _tt_local_del, bat_priv); | 447 | |
448 | for (i = 0; i < hash->size; i++) { | ||
449 | head = &hash->table[i]; | ||
450 | list_lock = &hash->list_locks[i]; | ||
451 | |||
452 | spin_lock_bh(list_lock); | ||
453 | hlist_for_each_entry_safe(tt_local_entry, node, node_tmp, | ||
454 | head, hash_entry) { | ||
455 | hlist_del_rcu(node); | ||
456 | tt_local_entry_free_ref(tt_local_entry); | ||
457 | } | ||
458 | spin_unlock_bh(list_lock); | ||
459 | } | ||
460 | |||
461 | hash_destroy(hash); | ||
462 | |||
396 | bat_priv->tt_local_hash = NULL; | 463 | bat_priv->tt_local_hash = NULL; |
397 | } | 464 | } |
398 | 465 | ||
399 | int tt_global_init(struct bat_priv *bat_priv) | 466 | static int tt_global_init(struct bat_priv *bat_priv) |
400 | { | 467 | { |
401 | if (bat_priv->tt_global_hash) | 468 | if (bat_priv->tt_global_hash) |
402 | return 1; | 469 | return 1; |
@@ -409,73 +476,78 @@ int tt_global_init(struct bat_priv *bat_priv) | |||
409 | return 1; | 476 | return 1; |
410 | } | 477 | } |
411 | 478 | ||
412 | void tt_global_add_orig(struct bat_priv *bat_priv, | 479 | static void tt_changes_list_free(struct bat_priv *bat_priv) |
413 | struct orig_node *orig_node, | ||
414 | const unsigned char *tt_buff, int tt_buff_len) | ||
415 | { | 480 | { |
416 | struct tt_global_entry *tt_global_entry; | 481 | struct tt_change_node *entry, *safe; |
417 | struct tt_local_entry *tt_local_entry; | ||
418 | int tt_buff_count = 0; | ||
419 | const unsigned char *tt_ptr; | ||
420 | |||
421 | while ((tt_buff_count + 1) * ETH_ALEN <= tt_buff_len) { | ||
422 | spin_lock_bh(&bat_priv->tt_ghash_lock); | ||
423 | |||
424 | tt_ptr = tt_buff + (tt_buff_count * ETH_ALEN); | ||
425 | tt_global_entry = tt_global_hash_find(bat_priv, tt_ptr); | ||
426 | |||
427 | if (!tt_global_entry) { | ||
428 | spin_unlock_bh(&bat_priv->tt_ghash_lock); | ||
429 | 482 | ||
430 | tt_global_entry = kmalloc(sizeof(*tt_global_entry), | 483 | spin_lock_bh(&bat_priv->tt_changes_list_lock); |
431 | GFP_ATOMIC); | ||
432 | 484 | ||
433 | if (!tt_global_entry) | 485 | list_for_each_entry_safe(entry, safe, &bat_priv->tt_changes_list, |
434 | break; | 486 | list) { |
487 | list_del(&entry->list); | ||
488 | kfree(entry); | ||
489 | } | ||
435 | 490 | ||
436 | memcpy(tt_global_entry->addr, tt_ptr, ETH_ALEN); | 491 | atomic_set(&bat_priv->tt_local_changes, 0); |
492 | spin_unlock_bh(&bat_priv->tt_changes_list_lock); | ||
493 | } | ||
437 | 494 | ||
438 | bat_dbg(DBG_ROUTES, bat_priv, | 495 | /* caller must hold orig_node refcount */ |
439 | "Creating new global tt entry: " | 496 | int tt_global_add(struct bat_priv *bat_priv, struct orig_node *orig_node, |
440 | "%pM (via %pM)\n", | 497 | const unsigned char *tt_addr, uint8_t ttvn, bool roaming) |
441 | tt_global_entry->addr, orig_node->orig); | 498 | { |
499 | struct tt_global_entry *tt_global_entry; | ||
500 | struct orig_node *orig_node_tmp; | ||
501 | int ret = 0; | ||
442 | 502 | ||
443 | spin_lock_bh(&bat_priv->tt_ghash_lock); | 503 | tt_global_entry = tt_global_hash_find(bat_priv, tt_addr); |
444 | hash_add(bat_priv->tt_global_hash, compare_gtt, | ||
445 | choose_orig, tt_global_entry, | ||
446 | &tt_global_entry->hash_entry); | ||
447 | 504 | ||
448 | } | 505 | if (!tt_global_entry) { |
506 | tt_global_entry = | ||
507 | kmalloc(sizeof(*tt_global_entry), | ||
508 | GFP_ATOMIC); | ||
509 | if (!tt_global_entry) | ||
510 | goto out; | ||
449 | 511 | ||
512 | memcpy(tt_global_entry->addr, tt_addr, ETH_ALEN); | ||
513 | /* Assign the new orig_node */ | ||
514 | atomic_inc(&orig_node->refcount); | ||
450 | tt_global_entry->orig_node = orig_node; | 515 | tt_global_entry->orig_node = orig_node; |
451 | spin_unlock_bh(&bat_priv->tt_ghash_lock); | 516 | tt_global_entry->ttvn = ttvn; |
452 | 517 | tt_global_entry->flags = NO_FLAGS; | |
453 | /* remove address from local hash if present */ | 518 | tt_global_entry->roam_at = 0; |
454 | spin_lock_bh(&bat_priv->tt_lhash_lock); | 519 | atomic_set(&tt_global_entry->refcount, 2); |
455 | 520 | ||
456 | tt_ptr = tt_buff + (tt_buff_count * ETH_ALEN); | 521 | hash_add(bat_priv->tt_global_hash, compare_gtt, |
457 | tt_local_entry = tt_local_hash_find(bat_priv, tt_ptr); | 522 | choose_orig, tt_global_entry, |
458 | 523 | &tt_global_entry->hash_entry); | |
459 | if (tt_local_entry) | 524 | atomic_inc(&orig_node->tt_size); |
460 | tt_local_del(bat_priv, tt_local_entry, | 525 | } else { |
461 | "global tt received"); | 526 | if (tt_global_entry->orig_node != orig_node) { |
462 | 527 | atomic_dec(&tt_global_entry->orig_node->tt_size); | |
463 | spin_unlock_bh(&bat_priv->tt_lhash_lock); | 528 | orig_node_tmp = tt_global_entry->orig_node; |
464 | 529 | atomic_inc(&orig_node->refcount); | |
465 | tt_buff_count++; | 530 | tt_global_entry->orig_node = orig_node; |
531 | orig_node_free_ref(orig_node_tmp); | ||
532 | atomic_inc(&orig_node->tt_size); | ||
533 | } | ||
534 | tt_global_entry->ttvn = ttvn; | ||
535 | tt_global_entry->flags = NO_FLAGS; | ||
536 | tt_global_entry->roam_at = 0; | ||
466 | } | 537 | } |
467 | 538 | ||
468 | /* initialize, and overwrite if malloc succeeds */ | 539 | bat_dbg(DBG_TT, bat_priv, |
469 | orig_node->tt_buff = NULL; | 540 | "Creating new global tt entry: %pM (via %pM)\n", |
470 | orig_node->tt_buff_len = 0; | 541 | tt_global_entry->addr, orig_node->orig); |
471 | 542 | ||
472 | if (tt_buff_len > 0) { | 543 | /* remove address from local hash if present */ |
473 | orig_node->tt_buff = kmalloc(tt_buff_len, GFP_ATOMIC); | 544 | tt_local_remove(bat_priv, tt_global_entry->addr, |
474 | if (orig_node->tt_buff) { | 545 | "global tt received", roaming); |
475 | memcpy(orig_node->tt_buff, tt_buff, tt_buff_len); | 546 | ret = 1; |
476 | orig_node->tt_buff_len = tt_buff_len; | 547 | out: |
477 | } | 548 | if (tt_global_entry) |
478 | } | 549 | tt_global_entry_free_ref(tt_global_entry); |
550 | return ret; | ||
479 | } | 551 | } |
480 | 552 | ||
481 | int tt_global_seq_print_text(struct seq_file *seq, void *offset) | 553 | int tt_global_seq_print_text(struct seq_file *seq, void *offset) |
@@ -509,26 +581,27 @@ int tt_global_seq_print_text(struct seq_file *seq, void *offset) | |||
509 | seq_printf(seq, | 581 | seq_printf(seq, |
510 | "Globally announced TT entries received via the mesh %s\n", | 582 | "Globally announced TT entries received via the mesh %s\n", |
511 | net_dev->name); | 583 | net_dev->name); |
512 | 584 | seq_printf(seq, " %-13s %s %-15s %s\n", | |
513 | spin_lock_bh(&bat_priv->tt_ghash_lock); | 585 | "Client", "(TTVN)", "Originator", "(Curr TTVN)"); |
514 | 586 | ||
515 | buf_size = 1; | 587 | buf_size = 1; |
516 | /* Estimate length for: " * xx:xx:xx:xx:xx:xx via xx:xx:xx:xx:xx:xx\n"*/ | 588 | /* Estimate length for: " * xx:xx:xx:xx:xx:xx (ttvn) via |
589 | * xx:xx:xx:xx:xx:xx (cur_ttvn)\n"*/ | ||
517 | for (i = 0; i < hash->size; i++) { | 590 | for (i = 0; i < hash->size; i++) { |
518 | head = &hash->table[i]; | 591 | head = &hash->table[i]; |
519 | 592 | ||
520 | rcu_read_lock(); | 593 | rcu_read_lock(); |
521 | __hlist_for_each_rcu(node, head) | 594 | __hlist_for_each_rcu(node, head) |
522 | buf_size += 43; | 595 | buf_size += 59; |
523 | rcu_read_unlock(); | 596 | rcu_read_unlock(); |
524 | } | 597 | } |
525 | 598 | ||
526 | buff = kmalloc(buf_size, GFP_ATOMIC); | 599 | buff = kmalloc(buf_size, GFP_ATOMIC); |
527 | if (!buff) { | 600 | if (!buff) { |
528 | spin_unlock_bh(&bat_priv->tt_ghash_lock); | ||
529 | ret = -ENOMEM; | 601 | ret = -ENOMEM; |
530 | goto out; | 602 | goto out; |
531 | } | 603 | } |
604 | |||
532 | buff[0] = '\0'; | 605 | buff[0] = '\0'; |
533 | pos = 0; | 606 | pos = 0; |
534 | 607 | ||
@@ -538,16 +611,18 @@ int tt_global_seq_print_text(struct seq_file *seq, void *offset) | |||
538 | rcu_read_lock(); | 611 | rcu_read_lock(); |
539 | hlist_for_each_entry_rcu(tt_global_entry, node, | 612 | hlist_for_each_entry_rcu(tt_global_entry, node, |
540 | head, hash_entry) { | 613 | head, hash_entry) { |
541 | pos += snprintf(buff + pos, 44, | 614 | pos += snprintf(buff + pos, 61, |
542 | " * %pM via %pM\n", | 615 | " * %pM (%3u) via %pM (%3u)\n", |
543 | tt_global_entry->addr, | 616 | tt_global_entry->addr, |
544 | tt_global_entry->orig_node->orig); | 617 | tt_global_entry->ttvn, |
618 | tt_global_entry->orig_node->orig, | ||
619 | (uint8_t) atomic_read( | ||
620 | &tt_global_entry->orig_node-> | ||
621 | last_ttvn)); | ||
545 | } | 622 | } |
546 | rcu_read_unlock(); | 623 | rcu_read_unlock(); |
547 | } | 624 | } |
548 | 625 | ||
549 | spin_unlock_bh(&bat_priv->tt_ghash_lock); | ||
550 | |||
551 | seq_printf(seq, "%s", buff); | 626 | seq_printf(seq, "%s", buff); |
552 | kfree(buff); | 627 | kfree(buff); |
553 | out: | 628 | out: |
@@ -556,64 +631,145 @@ out: | |||
556 | return ret; | 631 | return ret; |
557 | } | 632 | } |
558 | 633 | ||
559 | static void _tt_global_del_orig(struct bat_priv *bat_priv, | 634 | static void _tt_global_del(struct bat_priv *bat_priv, |
560 | struct tt_global_entry *tt_global_entry, | 635 | struct tt_global_entry *tt_global_entry, |
561 | const char *message) | 636 | const char *message) |
562 | { | 637 | { |
563 | bat_dbg(DBG_ROUTES, bat_priv, | 638 | if (!tt_global_entry) |
639 | goto out; | ||
640 | |||
641 | bat_dbg(DBG_TT, bat_priv, | ||
564 | "Deleting global tt entry %pM (via %pM): %s\n", | 642 | "Deleting global tt entry %pM (via %pM): %s\n", |
565 | tt_global_entry->addr, tt_global_entry->orig_node->orig, | 643 | tt_global_entry->addr, tt_global_entry->orig_node->orig, |
566 | message); | 644 | message); |
567 | 645 | ||
646 | atomic_dec(&tt_global_entry->orig_node->tt_size); | ||
647 | |||
568 | hash_remove(bat_priv->tt_global_hash, compare_gtt, choose_orig, | 648 | hash_remove(bat_priv->tt_global_hash, compare_gtt, choose_orig, |
569 | tt_global_entry->addr); | 649 | tt_global_entry->addr); |
570 | kfree(tt_global_entry); | 650 | out: |
651 | if (tt_global_entry) | ||
652 | tt_global_entry_free_ref(tt_global_entry); | ||
571 | } | 653 | } |
572 | 654 | ||
573 | void tt_global_del_orig(struct bat_priv *bat_priv, | 655 | void tt_global_del(struct bat_priv *bat_priv, |
574 | struct orig_node *orig_node, const char *message) | 656 | struct orig_node *orig_node, const unsigned char *addr, |
657 | const char *message, bool roaming) | ||
575 | { | 658 | { |
576 | struct tt_global_entry *tt_global_entry; | 659 | struct tt_global_entry *tt_global_entry = NULL; |
577 | int tt_buff_count = 0; | ||
578 | unsigned char *tt_ptr; | ||
579 | 660 | ||
580 | if (orig_node->tt_buff_len == 0) | 661 | tt_global_entry = tt_global_hash_find(bat_priv, addr); |
581 | return; | 662 | if (!tt_global_entry) |
663 | goto out; | ||
582 | 664 | ||
583 | spin_lock_bh(&bat_priv->tt_ghash_lock); | 665 | if (tt_global_entry->orig_node == orig_node) { |
666 | if (roaming) { | ||
667 | tt_global_entry->flags |= TT_CLIENT_ROAM; | ||
668 | tt_global_entry->roam_at = jiffies; | ||
669 | goto out; | ||
670 | } | ||
671 | _tt_global_del(bat_priv, tt_global_entry, message); | ||
672 | } | ||
673 | out: | ||
674 | if (tt_global_entry) | ||
675 | tt_global_entry_free_ref(tt_global_entry); | ||
676 | } | ||
584 | 677 | ||
585 | while ((tt_buff_count + 1) * ETH_ALEN <= orig_node->tt_buff_len) { | 678 | void tt_global_del_orig(struct bat_priv *bat_priv, |
586 | tt_ptr = orig_node->tt_buff + (tt_buff_count * ETH_ALEN); | 679 | struct orig_node *orig_node, const char *message) |
587 | tt_global_entry = tt_global_hash_find(bat_priv, tt_ptr); | 680 | { |
681 | struct tt_global_entry *tt_global_entry; | ||
682 | int i; | ||
683 | struct hashtable_t *hash = bat_priv->tt_global_hash; | ||
684 | struct hlist_node *node, *safe; | ||
685 | struct hlist_head *head; | ||
686 | spinlock_t *list_lock; /* protects write access to the hash lists */ | ||
588 | 687 | ||
589 | if ((tt_global_entry) && | 688 | for (i = 0; i < hash->size; i++) { |
590 | (tt_global_entry->orig_node == orig_node)) | 689 | head = &hash->table[i]; |
591 | _tt_global_del_orig(bat_priv, tt_global_entry, | 690 | list_lock = &hash->list_locks[i]; |
592 | message); | ||
593 | 691 | ||
594 | tt_buff_count++; | 692 | spin_lock_bh(list_lock); |
693 | hlist_for_each_entry_safe(tt_global_entry, node, safe, | ||
694 | head, hash_entry) { | ||
695 | if (tt_global_entry->orig_node == orig_node) { | ||
696 | bat_dbg(DBG_TT, bat_priv, | ||
697 | "Deleting global tt entry %pM " | ||
698 | "(via %pM): originator time out\n", | ||
699 | tt_global_entry->addr, | ||
700 | tt_global_entry->orig_node->orig); | ||
701 | hlist_del_rcu(node); | ||
702 | tt_global_entry_free_ref(tt_global_entry); | ||
703 | } | ||
704 | } | ||
705 | spin_unlock_bh(list_lock); | ||
595 | } | 706 | } |
596 | 707 | atomic_set(&orig_node->tt_size, 0); | |
597 | spin_unlock_bh(&bat_priv->tt_ghash_lock); | ||
598 | |||
599 | orig_node->tt_buff_len = 0; | ||
600 | kfree(orig_node->tt_buff); | ||
601 | orig_node->tt_buff = NULL; | ||
602 | } | 708 | } |
603 | 709 | ||
604 | static void tt_global_del(struct hlist_node *node, void *arg) | 710 | static void tt_global_roam_purge(struct bat_priv *bat_priv) |
605 | { | 711 | { |
606 | void *data = container_of(node, struct tt_global_entry, hash_entry); | 712 | struct hashtable_t *hash = bat_priv->tt_global_hash; |
713 | struct tt_global_entry *tt_global_entry; | ||
714 | struct hlist_node *node, *node_tmp; | ||
715 | struct hlist_head *head; | ||
716 | spinlock_t *list_lock; /* protects write access to the hash lists */ | ||
717 | int i; | ||
718 | |||
719 | for (i = 0; i < hash->size; i++) { | ||
720 | head = &hash->table[i]; | ||
721 | list_lock = &hash->list_locks[i]; | ||
722 | |||
723 | spin_lock_bh(list_lock); | ||
724 | hlist_for_each_entry_safe(tt_global_entry, node, node_tmp, | ||
725 | head, hash_entry) { | ||
726 | if (!(tt_global_entry->flags & TT_CLIENT_ROAM)) | ||
727 | continue; | ||
728 | if (!is_out_of_time(tt_global_entry->roam_at, | ||
729 | TT_CLIENT_ROAM_TIMEOUT * 1000)) | ||
730 | continue; | ||
731 | |||
732 | bat_dbg(DBG_TT, bat_priv, "Deleting global " | ||
733 | "tt entry (%pM): Roaming timeout\n", | ||
734 | tt_global_entry->addr); | ||
735 | atomic_dec(&tt_global_entry->orig_node->tt_size); | ||
736 | hlist_del_rcu(node); | ||
737 | tt_global_entry_free_ref(tt_global_entry); | ||
738 | } | ||
739 | spin_unlock_bh(list_lock); | ||
740 | } | ||
607 | 741 | ||
608 | kfree(data); | ||
609 | } | 742 | } |
610 | 743 | ||
611 | void tt_global_free(struct bat_priv *bat_priv) | 744 | static void tt_global_table_free(struct bat_priv *bat_priv) |
612 | { | 745 | { |
746 | struct hashtable_t *hash; | ||
747 | spinlock_t *list_lock; /* protects write access to the hash lists */ | ||
748 | struct tt_global_entry *tt_global_entry; | ||
749 | struct hlist_node *node, *node_tmp; | ||
750 | struct hlist_head *head; | ||
751 | int i; | ||
752 | |||
613 | if (!bat_priv->tt_global_hash) | 753 | if (!bat_priv->tt_global_hash) |
614 | return; | 754 | return; |
615 | 755 | ||
616 | hash_delete(bat_priv->tt_global_hash, tt_global_del, NULL); | 756 | hash = bat_priv->tt_global_hash; |
757 | |||
758 | for (i = 0; i < hash->size; i++) { | ||
759 | head = &hash->table[i]; | ||
760 | list_lock = &hash->list_locks[i]; | ||
761 | |||
762 | spin_lock_bh(list_lock); | ||
763 | hlist_for_each_entry_safe(tt_global_entry, node, node_tmp, | ||
764 | head, hash_entry) { | ||
765 | hlist_del_rcu(node); | ||
766 | tt_global_entry_free_ref(tt_global_entry); | ||
767 | } | ||
768 | spin_unlock_bh(list_lock); | ||
769 | } | ||
770 | |||
771 | hash_destroy(hash); | ||
772 | |||
617 | bat_priv->tt_global_hash = NULL; | 773 | bat_priv->tt_global_hash = NULL; |
618 | } | 774 | } |
619 | 775 | ||
@@ -623,18 +779,846 @@ struct orig_node *transtable_search(struct bat_priv *bat_priv, | |||
623 | struct tt_global_entry *tt_global_entry; | 779 | struct tt_global_entry *tt_global_entry; |
624 | struct orig_node *orig_node = NULL; | 780 | struct orig_node *orig_node = NULL; |
625 | 781 | ||
626 | spin_lock_bh(&bat_priv->tt_ghash_lock); | ||
627 | tt_global_entry = tt_global_hash_find(bat_priv, addr); | 782 | tt_global_entry = tt_global_hash_find(bat_priv, addr); |
628 | 783 | ||
629 | if (!tt_global_entry) | 784 | if (!tt_global_entry) |
630 | goto out; | 785 | goto out; |
631 | 786 | ||
632 | if (!atomic_inc_not_zero(&tt_global_entry->orig_node->refcount)) | 787 | if (!atomic_inc_not_zero(&tt_global_entry->orig_node->refcount)) |
633 | goto out; | 788 | goto free_tt; |
634 | 789 | ||
635 | orig_node = tt_global_entry->orig_node; | 790 | orig_node = tt_global_entry->orig_node; |
636 | 791 | ||
792 | free_tt: | ||
793 | tt_global_entry_free_ref(tt_global_entry); | ||
637 | out: | 794 | out: |
638 | spin_unlock_bh(&bat_priv->tt_ghash_lock); | ||
639 | return orig_node; | 795 | return orig_node; |
640 | } | 796 | } |
797 | |||
798 | /* Calculates the checksum of the local table of a given orig_node */ | ||
799 | uint16_t tt_global_crc(struct bat_priv *bat_priv, struct orig_node *orig_node) | ||
800 | { | ||
801 | uint16_t total = 0, total_one; | ||
802 | struct hashtable_t *hash = bat_priv->tt_global_hash; | ||
803 | struct tt_global_entry *tt_global_entry; | ||
804 | struct hlist_node *node; | ||
805 | struct hlist_head *head; | ||
806 | int i, j; | ||
807 | |||
808 | for (i = 0; i < hash->size; i++) { | ||
809 | head = &hash->table[i]; | ||
810 | |||
811 | rcu_read_lock(); | ||
812 | hlist_for_each_entry_rcu(tt_global_entry, node, | ||
813 | head, hash_entry) { | ||
814 | if (compare_eth(tt_global_entry->orig_node, | ||
815 | orig_node)) { | ||
816 | /* Roaming clients are in the global table for | ||
817 | * consistency only. They don't have to be | ||
818 | * taken into account while computing the | ||
819 | * global crc */ | ||
820 | if (tt_global_entry->flags & TT_CLIENT_ROAM) | ||
821 | continue; | ||
822 | total_one = 0; | ||
823 | for (j = 0; j < ETH_ALEN; j++) | ||
824 | total_one = crc16_byte(total_one, | ||
825 | tt_global_entry->addr[j]); | ||
826 | total ^= total_one; | ||
827 | } | ||
828 | } | ||
829 | rcu_read_unlock(); | ||
830 | } | ||
831 | |||
832 | return total; | ||
833 | } | ||
834 | |||
835 | /* Calculates the checksum of the local table */ | ||
836 | uint16_t tt_local_crc(struct bat_priv *bat_priv) | ||
837 | { | ||
838 | uint16_t total = 0, total_one; | ||
839 | struct hashtable_t *hash = bat_priv->tt_local_hash; | ||
840 | struct tt_local_entry *tt_local_entry; | ||
841 | struct hlist_node *node; | ||
842 | struct hlist_head *head; | ||
843 | int i, j; | ||
844 | |||
845 | for (i = 0; i < hash->size; i++) { | ||
846 | head = &hash->table[i]; | ||
847 | |||
848 | rcu_read_lock(); | ||
849 | hlist_for_each_entry_rcu(tt_local_entry, node, | ||
850 | head, hash_entry) { | ||
851 | total_one = 0; | ||
852 | for (j = 0; j < ETH_ALEN; j++) | ||
853 | total_one = crc16_byte(total_one, | ||
854 | tt_local_entry->addr[j]); | ||
855 | total ^= total_one; | ||
856 | } | ||
857 | rcu_read_unlock(); | ||
858 | } | ||
859 | |||
860 | return total; | ||
861 | } | ||
862 | |||
863 | static void tt_req_list_free(struct bat_priv *bat_priv) | ||
864 | { | ||
865 | struct tt_req_node *node, *safe; | ||
866 | |||
867 | spin_lock_bh(&bat_priv->tt_req_list_lock); | ||
868 | |||
869 | list_for_each_entry_safe(node, safe, &bat_priv->tt_req_list, list) { | ||
870 | list_del(&node->list); | ||
871 | kfree(node); | ||
872 | } | ||
873 | |||
874 | spin_unlock_bh(&bat_priv->tt_req_list_lock); | ||
875 | } | ||
876 | |||
877 | void tt_save_orig_buffer(struct bat_priv *bat_priv, struct orig_node *orig_node, | ||
878 | const unsigned char *tt_buff, uint8_t tt_num_changes) | ||
879 | { | ||
880 | uint16_t tt_buff_len = tt_len(tt_num_changes); | ||
881 | |||
882 | /* Replace the old buffer only if I received something in the | ||
883 | * last OGM (the OGM could carry no changes) */ | ||
884 | spin_lock_bh(&orig_node->tt_buff_lock); | ||
885 | if (tt_buff_len > 0) { | ||
886 | kfree(orig_node->tt_buff); | ||
887 | orig_node->tt_buff_len = 0; | ||
888 | orig_node->tt_buff = kmalloc(tt_buff_len, GFP_ATOMIC); | ||
889 | if (orig_node->tt_buff) { | ||
890 | memcpy(orig_node->tt_buff, tt_buff, tt_buff_len); | ||
891 | orig_node->tt_buff_len = tt_buff_len; | ||
892 | } | ||
893 | } | ||
894 | spin_unlock_bh(&orig_node->tt_buff_lock); | ||
895 | } | ||
896 | |||
897 | static void tt_req_purge(struct bat_priv *bat_priv) | ||
898 | { | ||
899 | struct tt_req_node *node, *safe; | ||
900 | |||
901 | spin_lock_bh(&bat_priv->tt_req_list_lock); | ||
902 | list_for_each_entry_safe(node, safe, &bat_priv->tt_req_list, list) { | ||
903 | if (is_out_of_time(node->issued_at, | ||
904 | TT_REQUEST_TIMEOUT * 1000)) { | ||
905 | list_del(&node->list); | ||
906 | kfree(node); | ||
907 | } | ||
908 | } | ||
909 | spin_unlock_bh(&bat_priv->tt_req_list_lock); | ||
910 | } | ||
911 | |||
912 | /* returns the pointer to the new tt_req_node struct if no request | ||
913 | * has already been issued for this orig_node, NULL otherwise */ | ||
914 | static struct tt_req_node *new_tt_req_node(struct bat_priv *bat_priv, | ||
915 | struct orig_node *orig_node) | ||
916 | { | ||
917 | struct tt_req_node *tt_req_node_tmp, *tt_req_node = NULL; | ||
918 | |||
919 | spin_lock_bh(&bat_priv->tt_req_list_lock); | ||
920 | list_for_each_entry(tt_req_node_tmp, &bat_priv->tt_req_list, list) { | ||
921 | if (compare_eth(tt_req_node_tmp, orig_node) && | ||
922 | !is_out_of_time(tt_req_node_tmp->issued_at, | ||
923 | TT_REQUEST_TIMEOUT * 1000)) | ||
924 | goto unlock; | ||
925 | } | ||
926 | |||
927 | tt_req_node = kmalloc(sizeof(*tt_req_node), GFP_ATOMIC); | ||
928 | if (!tt_req_node) | ||
929 | goto unlock; | ||
930 | |||
931 | memcpy(tt_req_node->addr, orig_node->orig, ETH_ALEN); | ||
932 | tt_req_node->issued_at = jiffies; | ||
933 | |||
934 | list_add(&tt_req_node->list, &bat_priv->tt_req_list); | ||
935 | unlock: | ||
936 | spin_unlock_bh(&bat_priv->tt_req_list_lock); | ||
937 | return tt_req_node; | ||
938 | } | ||
939 | |||
940 | static int tt_global_valid_entry(const void *entry_ptr, const void *data_ptr) | ||
941 | { | ||
942 | const struct tt_global_entry *tt_global_entry = entry_ptr; | ||
943 | const struct orig_node *orig_node = data_ptr; | ||
944 | |||
945 | if (tt_global_entry->flags & TT_CLIENT_ROAM) | ||
946 | return 0; | ||
947 | |||
948 | return (tt_global_entry->orig_node == orig_node); | ||
949 | } | ||
950 | |||
951 | static struct sk_buff *tt_response_fill_table(uint16_t tt_len, uint8_t ttvn, | ||
952 | struct hashtable_t *hash, | ||
953 | struct hard_iface *primary_if, | ||
954 | int (*valid_cb)(const void *, | ||
955 | const void *), | ||
956 | void *cb_data) | ||
957 | { | ||
958 | struct tt_local_entry *tt_local_entry; | ||
959 | struct tt_query_packet *tt_response; | ||
960 | struct tt_change *tt_change; | ||
961 | struct hlist_node *node; | ||
962 | struct hlist_head *head; | ||
963 | struct sk_buff *skb = NULL; | ||
964 | uint16_t tt_tot, tt_count; | ||
965 | ssize_t tt_query_size = sizeof(struct tt_query_packet); | ||
966 | int i; | ||
967 | |||
968 | if (tt_query_size + tt_len > primary_if->soft_iface->mtu) { | ||
969 | tt_len = primary_if->soft_iface->mtu - tt_query_size; | ||
970 | tt_len -= tt_len % sizeof(struct tt_change); | ||
971 | } | ||
972 | tt_tot = tt_len / sizeof(struct tt_change); | ||
973 | |||
974 | skb = dev_alloc_skb(tt_query_size + tt_len + ETH_HLEN); | ||
975 | if (!skb) | ||
976 | goto out; | ||
977 | |||
978 | skb_reserve(skb, ETH_HLEN); | ||
979 | tt_response = (struct tt_query_packet *)skb_put(skb, | ||
980 | tt_query_size + tt_len); | ||
981 | tt_response->ttvn = ttvn; | ||
982 | tt_response->tt_data = htons(tt_tot); | ||
983 | |||
984 | tt_change = (struct tt_change *)(skb->data + tt_query_size); | ||
985 | tt_count = 0; | ||
986 | |||
987 | rcu_read_lock(); | ||
988 | for (i = 0; i < hash->size; i++) { | ||
989 | head = &hash->table[i]; | ||
990 | |||
991 | hlist_for_each_entry_rcu(tt_local_entry, node, | ||
992 | head, hash_entry) { | ||
993 | if (tt_count == tt_tot) | ||
994 | break; | ||
995 | |||
996 | if ((valid_cb) && (!valid_cb(tt_local_entry, cb_data))) | ||
997 | continue; | ||
998 | |||
999 | memcpy(tt_change->addr, tt_local_entry->addr, ETH_ALEN); | ||
1000 | tt_change->flags = NO_FLAGS; | ||
1001 | |||
1002 | tt_count++; | ||
1003 | tt_change++; | ||
1004 | } | ||
1005 | } | ||
1006 | rcu_read_unlock(); | ||
1007 | |||
1008 | out: | ||
1009 | return skb; | ||
1010 | } | ||
1011 | |||
1012 | int send_tt_request(struct bat_priv *bat_priv, struct orig_node *dst_orig_node, | ||
1013 | uint8_t ttvn, uint16_t tt_crc, bool full_table) | ||
1014 | { | ||
1015 | struct sk_buff *skb = NULL; | ||
1016 | struct tt_query_packet *tt_request; | ||
1017 | struct neigh_node *neigh_node = NULL; | ||
1018 | struct hard_iface *primary_if; | ||
1019 | struct tt_req_node *tt_req_node = NULL; | ||
1020 | int ret = 1; | ||
1021 | |||
1022 | primary_if = primary_if_get_selected(bat_priv); | ||
1023 | if (!primary_if) | ||
1024 | goto out; | ||
1025 | |||
1026 | /* The new tt_req will be issued only if I'm not waiting for a | ||
1027 | * reply from the same orig_node yet */ | ||
1028 | tt_req_node = new_tt_req_node(bat_priv, dst_orig_node); | ||
1029 | if (!tt_req_node) | ||
1030 | goto out; | ||
1031 | |||
1032 | skb = dev_alloc_skb(sizeof(struct tt_query_packet) + ETH_HLEN); | ||
1033 | if (!skb) | ||
1034 | goto out; | ||
1035 | |||
1036 | skb_reserve(skb, ETH_HLEN); | ||
1037 | |||
1038 | tt_request = (struct tt_query_packet *)skb_put(skb, | ||
1039 | sizeof(struct tt_query_packet)); | ||
1040 | |||
1041 | tt_request->packet_type = BAT_TT_QUERY; | ||
1042 | tt_request->version = COMPAT_VERSION; | ||
1043 | memcpy(tt_request->src, primary_if->net_dev->dev_addr, ETH_ALEN); | ||
1044 | memcpy(tt_request->dst, dst_orig_node->orig, ETH_ALEN); | ||
1045 | tt_request->ttl = TTL; | ||
1046 | tt_request->ttvn = ttvn; | ||
1047 | tt_request->tt_data = tt_crc; | ||
1048 | tt_request->flags = TT_REQUEST; | ||
1049 | |||
1050 | if (full_table) | ||
1051 | tt_request->flags |= TT_FULL_TABLE; | ||
1052 | |||
1053 | neigh_node = orig_node_get_router(dst_orig_node); | ||
1054 | if (!neigh_node) | ||
1055 | goto out; | ||
1056 | |||
1057 | bat_dbg(DBG_TT, bat_priv, "Sending TT_REQUEST to %pM via %pM " | ||
1058 | "[%c]\n", dst_orig_node->orig, neigh_node->addr, | ||
1059 | (full_table ? 'F' : '.')); | ||
1060 | |||
1061 | send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr); | ||
1062 | ret = 0; | ||
1063 | |||
1064 | out: | ||
1065 | if (neigh_node) | ||
1066 | neigh_node_free_ref(neigh_node); | ||
1067 | if (primary_if) | ||
1068 | hardif_free_ref(primary_if); | ||
1069 | if (ret) | ||
1070 | kfree_skb(skb); | ||
1071 | if (ret && tt_req_node) { | ||
1072 | spin_lock_bh(&bat_priv->tt_req_list_lock); | ||
1073 | list_del(&tt_req_node->list); | ||
1074 | spin_unlock_bh(&bat_priv->tt_req_list_lock); | ||
1075 | kfree(tt_req_node); | ||
1076 | } | ||
1077 | return ret; | ||
1078 | } | ||
1079 | |||
1080 | static bool send_other_tt_response(struct bat_priv *bat_priv, | ||
1081 | struct tt_query_packet *tt_request) | ||
1082 | { | ||
1083 | struct orig_node *req_dst_orig_node = NULL, *res_dst_orig_node = NULL; | ||
1084 | struct neigh_node *neigh_node = NULL; | ||
1085 | struct hard_iface *primary_if = NULL; | ||
1086 | uint8_t orig_ttvn, req_ttvn, ttvn; | ||
1087 | int ret = false; | ||
1088 | unsigned char *tt_buff; | ||
1089 | bool full_table; | ||
1090 | uint16_t tt_len, tt_tot; | ||
1091 | struct sk_buff *skb = NULL; | ||
1092 | struct tt_query_packet *tt_response; | ||
1093 | |||
1094 | bat_dbg(DBG_TT, bat_priv, | ||
1095 | "Received TT_REQUEST from %pM for " | ||
1096 | "ttvn: %u (%pM) [%c]\n", tt_request->src, | ||
1097 | tt_request->ttvn, tt_request->dst, | ||
1098 | (tt_request->flags & TT_FULL_TABLE ? 'F' : '.')); | ||
1099 | |||
1100 | /* Let's get the orig node of the REAL destination */ | ||
1101 | req_dst_orig_node = get_orig_node(bat_priv, tt_request->dst); | ||
1102 | if (!req_dst_orig_node) | ||
1103 | goto out; | ||
1104 | |||
1105 | res_dst_orig_node = get_orig_node(bat_priv, tt_request->src); | ||
1106 | if (!res_dst_orig_node) | ||
1107 | goto out; | ||
1108 | |||
1109 | neigh_node = orig_node_get_router(res_dst_orig_node); | ||
1110 | if (!neigh_node) | ||
1111 | goto out; | ||
1112 | |||
1113 | primary_if = primary_if_get_selected(bat_priv); | ||
1114 | if (!primary_if) | ||
1115 | goto out; | ||
1116 | |||
1117 | orig_ttvn = (uint8_t)atomic_read(&req_dst_orig_node->last_ttvn); | ||
1118 | req_ttvn = tt_request->ttvn; | ||
1119 | |||
1120 | /* I have not the requested data */ | ||
1121 | if (orig_ttvn != req_ttvn || | ||
1122 | tt_request->tt_data != req_dst_orig_node->tt_crc) | ||
1123 | goto out; | ||
1124 | |||
1125 | /* If it has explicitly been requested the full table */ | ||
1126 | if (tt_request->flags & TT_FULL_TABLE || | ||
1127 | !req_dst_orig_node->tt_buff) | ||
1128 | full_table = true; | ||
1129 | else | ||
1130 | full_table = false; | ||
1131 | |||
1132 | /* In this version, fragmentation is not implemented, then | ||
1133 | * I'll send only one packet with as much TT entries as I can */ | ||
1134 | if (!full_table) { | ||
1135 | spin_lock_bh(&req_dst_orig_node->tt_buff_lock); | ||
1136 | tt_len = req_dst_orig_node->tt_buff_len; | ||
1137 | tt_tot = tt_len / sizeof(struct tt_change); | ||
1138 | |||
1139 | skb = dev_alloc_skb(sizeof(struct tt_query_packet) + | ||
1140 | tt_len + ETH_HLEN); | ||
1141 | if (!skb) | ||
1142 | goto unlock; | ||
1143 | |||
1144 | skb_reserve(skb, ETH_HLEN); | ||
1145 | tt_response = (struct tt_query_packet *)skb_put(skb, | ||
1146 | sizeof(struct tt_query_packet) + tt_len); | ||
1147 | tt_response->ttvn = req_ttvn; | ||
1148 | tt_response->tt_data = htons(tt_tot); | ||
1149 | |||
1150 | tt_buff = skb->data + sizeof(struct tt_query_packet); | ||
1151 | /* Copy the last orig_node's OGM buffer */ | ||
1152 | memcpy(tt_buff, req_dst_orig_node->tt_buff, | ||
1153 | req_dst_orig_node->tt_buff_len); | ||
1154 | |||
1155 | spin_unlock_bh(&req_dst_orig_node->tt_buff_lock); | ||
1156 | } else { | ||
1157 | tt_len = (uint16_t)atomic_read(&req_dst_orig_node->tt_size) * | ||
1158 | sizeof(struct tt_change); | ||
1159 | ttvn = (uint8_t)atomic_read(&req_dst_orig_node->last_ttvn); | ||
1160 | |||
1161 | skb = tt_response_fill_table(tt_len, ttvn, | ||
1162 | bat_priv->tt_global_hash, | ||
1163 | primary_if, tt_global_valid_entry, | ||
1164 | req_dst_orig_node); | ||
1165 | if (!skb) | ||
1166 | goto out; | ||
1167 | |||
1168 | tt_response = (struct tt_query_packet *)skb->data; | ||
1169 | } | ||
1170 | |||
1171 | tt_response->packet_type = BAT_TT_QUERY; | ||
1172 | tt_response->version = COMPAT_VERSION; | ||
1173 | tt_response->ttl = TTL; | ||
1174 | memcpy(tt_response->src, req_dst_orig_node->orig, ETH_ALEN); | ||
1175 | memcpy(tt_response->dst, tt_request->src, ETH_ALEN); | ||
1176 | tt_response->flags = TT_RESPONSE; | ||
1177 | |||
1178 | if (full_table) | ||
1179 | tt_response->flags |= TT_FULL_TABLE; | ||
1180 | |||
1181 | bat_dbg(DBG_TT, bat_priv, | ||
1182 | "Sending TT_RESPONSE %pM via %pM for %pM (ttvn: %u)\n", | ||
1183 | res_dst_orig_node->orig, neigh_node->addr, | ||
1184 | req_dst_orig_node->orig, req_ttvn); | ||
1185 | |||
1186 | send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr); | ||
1187 | ret = true; | ||
1188 | goto out; | ||
1189 | |||
1190 | unlock: | ||
1191 | spin_unlock_bh(&req_dst_orig_node->tt_buff_lock); | ||
1192 | |||
1193 | out: | ||
1194 | if (res_dst_orig_node) | ||
1195 | orig_node_free_ref(res_dst_orig_node); | ||
1196 | if (req_dst_orig_node) | ||
1197 | orig_node_free_ref(req_dst_orig_node); | ||
1198 | if (neigh_node) | ||
1199 | neigh_node_free_ref(neigh_node); | ||
1200 | if (primary_if) | ||
1201 | hardif_free_ref(primary_if); | ||
1202 | if (!ret) | ||
1203 | kfree_skb(skb); | ||
1204 | return ret; | ||
1205 | |||
1206 | } | ||
1207 | static bool send_my_tt_response(struct bat_priv *bat_priv, | ||
1208 | struct tt_query_packet *tt_request) | ||
1209 | { | ||
1210 | struct orig_node *orig_node = NULL; | ||
1211 | struct neigh_node *neigh_node = NULL; | ||
1212 | struct hard_iface *primary_if = NULL; | ||
1213 | uint8_t my_ttvn, req_ttvn, ttvn; | ||
1214 | int ret = false; | ||
1215 | unsigned char *tt_buff; | ||
1216 | bool full_table; | ||
1217 | uint16_t tt_len, tt_tot; | ||
1218 | struct sk_buff *skb = NULL; | ||
1219 | struct tt_query_packet *tt_response; | ||
1220 | |||
1221 | bat_dbg(DBG_TT, bat_priv, | ||
1222 | "Received TT_REQUEST from %pM for " | ||
1223 | "ttvn: %u (me) [%c]\n", tt_request->src, | ||
1224 | tt_request->ttvn, | ||
1225 | (tt_request->flags & TT_FULL_TABLE ? 'F' : '.')); | ||
1226 | |||
1227 | |||
1228 | my_ttvn = (uint8_t)atomic_read(&bat_priv->ttvn); | ||
1229 | req_ttvn = tt_request->ttvn; | ||
1230 | |||
1231 | orig_node = get_orig_node(bat_priv, tt_request->src); | ||
1232 | if (!orig_node) | ||
1233 | goto out; | ||
1234 | |||
1235 | neigh_node = orig_node_get_router(orig_node); | ||
1236 | if (!neigh_node) | ||
1237 | goto out; | ||
1238 | |||
1239 | primary_if = primary_if_get_selected(bat_priv); | ||
1240 | if (!primary_if) | ||
1241 | goto out; | ||
1242 | |||
1243 | /* If the full table has been explicitly requested or the gap | ||
1244 | * is too big send the whole local translation table */ | ||
1245 | if (tt_request->flags & TT_FULL_TABLE || my_ttvn != req_ttvn || | ||
1246 | !bat_priv->tt_buff) | ||
1247 | full_table = true; | ||
1248 | else | ||
1249 | full_table = false; | ||
1250 | |||
1251 | /* In this version, fragmentation is not implemented, then | ||
1252 | * I'll send only one packet with as much TT entries as I can */ | ||
1253 | if (!full_table) { | ||
1254 | spin_lock_bh(&bat_priv->tt_buff_lock); | ||
1255 | tt_len = bat_priv->tt_buff_len; | ||
1256 | tt_tot = tt_len / sizeof(struct tt_change); | ||
1257 | |||
1258 | skb = dev_alloc_skb(sizeof(struct tt_query_packet) + | ||
1259 | tt_len + ETH_HLEN); | ||
1260 | if (!skb) | ||
1261 | goto unlock; | ||
1262 | |||
1263 | skb_reserve(skb, ETH_HLEN); | ||
1264 | tt_response = (struct tt_query_packet *)skb_put(skb, | ||
1265 | sizeof(struct tt_query_packet) + tt_len); | ||
1266 | tt_response->ttvn = req_ttvn; | ||
1267 | tt_response->tt_data = htons(tt_tot); | ||
1268 | |||
1269 | tt_buff = skb->data + sizeof(struct tt_query_packet); | ||
1270 | memcpy(tt_buff, bat_priv->tt_buff, | ||
1271 | bat_priv->tt_buff_len); | ||
1272 | spin_unlock_bh(&bat_priv->tt_buff_lock); | ||
1273 | } else { | ||
1274 | tt_len = (uint16_t)atomic_read(&bat_priv->num_local_tt) * | ||
1275 | sizeof(struct tt_change); | ||
1276 | ttvn = (uint8_t)atomic_read(&bat_priv->ttvn); | ||
1277 | |||
1278 | skb = tt_response_fill_table(tt_len, ttvn, | ||
1279 | bat_priv->tt_local_hash, | ||
1280 | primary_if, NULL, NULL); | ||
1281 | if (!skb) | ||
1282 | goto out; | ||
1283 | |||
1284 | tt_response = (struct tt_query_packet *)skb->data; | ||
1285 | } | ||
1286 | |||
1287 | tt_response->packet_type = BAT_TT_QUERY; | ||
1288 | tt_response->version = COMPAT_VERSION; | ||
1289 | tt_response->ttl = TTL; | ||
1290 | memcpy(tt_response->src, primary_if->net_dev->dev_addr, ETH_ALEN); | ||
1291 | memcpy(tt_response->dst, tt_request->src, ETH_ALEN); | ||
1292 | tt_response->flags = TT_RESPONSE; | ||
1293 | |||
1294 | if (full_table) | ||
1295 | tt_response->flags |= TT_FULL_TABLE; | ||
1296 | |||
1297 | bat_dbg(DBG_TT, bat_priv, | ||
1298 | "Sending TT_RESPONSE to %pM via %pM [%c]\n", | ||
1299 | orig_node->orig, neigh_node->addr, | ||
1300 | (tt_response->flags & TT_FULL_TABLE ? 'F' : '.')); | ||
1301 | |||
1302 | send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr); | ||
1303 | ret = true; | ||
1304 | goto out; | ||
1305 | |||
1306 | unlock: | ||
1307 | spin_unlock_bh(&bat_priv->tt_buff_lock); | ||
1308 | out: | ||
1309 | if (orig_node) | ||
1310 | orig_node_free_ref(orig_node); | ||
1311 | if (neigh_node) | ||
1312 | neigh_node_free_ref(neigh_node); | ||
1313 | if (primary_if) | ||
1314 | hardif_free_ref(primary_if); | ||
1315 | if (!ret) | ||
1316 | kfree_skb(skb); | ||
1317 | /* This packet was for me, so it doesn't need to be re-routed */ | ||
1318 | return true; | ||
1319 | } | ||
1320 | |||
1321 | bool send_tt_response(struct bat_priv *bat_priv, | ||
1322 | struct tt_query_packet *tt_request) | ||
1323 | { | ||
1324 | if (is_my_mac(tt_request->dst)) | ||
1325 | return send_my_tt_response(bat_priv, tt_request); | ||
1326 | else | ||
1327 | return send_other_tt_response(bat_priv, tt_request); | ||
1328 | } | ||
1329 | |||
1330 | static void _tt_update_changes(struct bat_priv *bat_priv, | ||
1331 | struct orig_node *orig_node, | ||
1332 | struct tt_change *tt_change, | ||
1333 | uint16_t tt_num_changes, uint8_t ttvn) | ||
1334 | { | ||
1335 | int i; | ||
1336 | |||
1337 | for (i = 0; i < tt_num_changes; i++) { | ||
1338 | if ((tt_change + i)->flags & TT_CHANGE_DEL) | ||
1339 | tt_global_del(bat_priv, orig_node, | ||
1340 | (tt_change + i)->addr, | ||
1341 | "tt removed by changes", | ||
1342 | (tt_change + i)->flags & TT_CLIENT_ROAM); | ||
1343 | else | ||
1344 | if (!tt_global_add(bat_priv, orig_node, | ||
1345 | (tt_change + i)->addr, ttvn, false)) | ||
1346 | /* In case of problem while storing a | ||
1347 | * global_entry, we stop the updating | ||
1348 | * procedure without committing the | ||
1349 | * ttvn change. This will avoid to send | ||
1350 | * corrupted data on tt_request | ||
1351 | */ | ||
1352 | return; | ||
1353 | } | ||
1354 | } | ||
1355 | |||
1356 | static void tt_fill_gtable(struct bat_priv *bat_priv, | ||
1357 | struct tt_query_packet *tt_response) | ||
1358 | { | ||
1359 | struct orig_node *orig_node = NULL; | ||
1360 | |||
1361 | orig_node = orig_hash_find(bat_priv, tt_response->src); | ||
1362 | if (!orig_node) | ||
1363 | goto out; | ||
1364 | |||
1365 | /* Purge the old table first.. */ | ||
1366 | tt_global_del_orig(bat_priv, orig_node, "Received full table"); | ||
1367 | |||
1368 | _tt_update_changes(bat_priv, orig_node, | ||
1369 | (struct tt_change *)(tt_response + 1), | ||
1370 | tt_response->tt_data, tt_response->ttvn); | ||
1371 | |||
1372 | spin_lock_bh(&orig_node->tt_buff_lock); | ||
1373 | kfree(orig_node->tt_buff); | ||
1374 | orig_node->tt_buff_len = 0; | ||
1375 | orig_node->tt_buff = NULL; | ||
1376 | spin_unlock_bh(&orig_node->tt_buff_lock); | ||
1377 | |||
1378 | atomic_set(&orig_node->last_ttvn, tt_response->ttvn); | ||
1379 | |||
1380 | out: | ||
1381 | if (orig_node) | ||
1382 | orig_node_free_ref(orig_node); | ||
1383 | } | ||
1384 | |||
1385 | void tt_update_changes(struct bat_priv *bat_priv, struct orig_node *orig_node, | ||
1386 | uint16_t tt_num_changes, uint8_t ttvn, | ||
1387 | struct tt_change *tt_change) | ||
1388 | { | ||
1389 | _tt_update_changes(bat_priv, orig_node, tt_change, tt_num_changes, | ||
1390 | ttvn); | ||
1391 | |||
1392 | tt_save_orig_buffer(bat_priv, orig_node, (unsigned char *)tt_change, | ||
1393 | tt_num_changes); | ||
1394 | atomic_set(&orig_node->last_ttvn, ttvn); | ||
1395 | } | ||
1396 | |||
1397 | bool is_my_client(struct bat_priv *bat_priv, const uint8_t *addr) | ||
1398 | { | ||
1399 | struct tt_local_entry *tt_local_entry = NULL; | ||
1400 | bool ret = false; | ||
1401 | |||
1402 | tt_local_entry = tt_local_hash_find(bat_priv, addr); | ||
1403 | if (!tt_local_entry) | ||
1404 | goto out; | ||
1405 | ret = true; | ||
1406 | out: | ||
1407 | if (tt_local_entry) | ||
1408 | tt_local_entry_free_ref(tt_local_entry); | ||
1409 | return ret; | ||
1410 | } | ||
1411 | |||
1412 | void handle_tt_response(struct bat_priv *bat_priv, | ||
1413 | struct tt_query_packet *tt_response) | ||
1414 | { | ||
1415 | struct tt_req_node *node, *safe; | ||
1416 | struct orig_node *orig_node = NULL; | ||
1417 | |||
1418 | bat_dbg(DBG_TT, bat_priv, "Received TT_RESPONSE from %pM for " | ||
1419 | "ttvn %d t_size: %d [%c]\n", | ||
1420 | tt_response->src, tt_response->ttvn, | ||
1421 | tt_response->tt_data, | ||
1422 | (tt_response->flags & TT_FULL_TABLE ? 'F' : '.')); | ||
1423 | |||
1424 | orig_node = orig_hash_find(bat_priv, tt_response->src); | ||
1425 | if (!orig_node) | ||
1426 | goto out; | ||
1427 | |||
1428 | if (tt_response->flags & TT_FULL_TABLE) | ||
1429 | tt_fill_gtable(bat_priv, tt_response); | ||
1430 | else | ||
1431 | tt_update_changes(bat_priv, orig_node, tt_response->tt_data, | ||
1432 | tt_response->ttvn, | ||
1433 | (struct tt_change *)(tt_response + 1)); | ||
1434 | |||
1435 | /* Delete the tt_req_node from pending tt_requests list */ | ||
1436 | spin_lock_bh(&bat_priv->tt_req_list_lock); | ||
1437 | list_for_each_entry_safe(node, safe, &bat_priv->tt_req_list, list) { | ||
1438 | if (!compare_eth(node->addr, tt_response->src)) | ||
1439 | continue; | ||
1440 | list_del(&node->list); | ||
1441 | kfree(node); | ||
1442 | } | ||
1443 | spin_unlock_bh(&bat_priv->tt_req_list_lock); | ||
1444 | |||
1445 | /* Recalculate the CRC for this orig_node and store it */ | ||
1446 | orig_node->tt_crc = tt_global_crc(bat_priv, orig_node); | ||
1447 | /* Roaming phase is over: tables are in sync again. I can | ||
1448 | * unset the flag */ | ||
1449 | orig_node->tt_poss_change = false; | ||
1450 | out: | ||
1451 | if (orig_node) | ||
1452 | orig_node_free_ref(orig_node); | ||
1453 | } | ||
1454 | |||
1455 | int tt_init(struct bat_priv *bat_priv) | ||
1456 | { | ||
1457 | if (!tt_local_init(bat_priv)) | ||
1458 | return 0; | ||
1459 | |||
1460 | if (!tt_global_init(bat_priv)) | ||
1461 | return 0; | ||
1462 | |||
1463 | tt_start_timer(bat_priv); | ||
1464 | |||
1465 | return 1; | ||
1466 | } | ||
1467 | |||
1468 | static void tt_roam_list_free(struct bat_priv *bat_priv) | ||
1469 | { | ||
1470 | struct tt_roam_node *node, *safe; | ||
1471 | |||
1472 | spin_lock_bh(&bat_priv->tt_roam_list_lock); | ||
1473 | |||
1474 | list_for_each_entry_safe(node, safe, &bat_priv->tt_roam_list, list) { | ||
1475 | list_del(&node->list); | ||
1476 | kfree(node); | ||
1477 | } | ||
1478 | |||
1479 | spin_unlock_bh(&bat_priv->tt_roam_list_lock); | ||
1480 | } | ||
1481 | |||
1482 | static void tt_roam_purge(struct bat_priv *bat_priv) | ||
1483 | { | ||
1484 | struct tt_roam_node *node, *safe; | ||
1485 | |||
1486 | spin_lock_bh(&bat_priv->tt_roam_list_lock); | ||
1487 | list_for_each_entry_safe(node, safe, &bat_priv->tt_roam_list, list) { | ||
1488 | if (!is_out_of_time(node->first_time, | ||
1489 | ROAMING_MAX_TIME * 1000)) | ||
1490 | continue; | ||
1491 | |||
1492 | list_del(&node->list); | ||
1493 | kfree(node); | ||
1494 | } | ||
1495 | spin_unlock_bh(&bat_priv->tt_roam_list_lock); | ||
1496 | } | ||
1497 | |||
1498 | /* This function checks whether the client already reached the | ||
1499 | * maximum number of possible roaming phases. In this case the ROAMING_ADV | ||
1500 | * will not be sent. | ||
1501 | * | ||
1502 | * returns true if the ROAMING_ADV can be sent, false otherwise */ | ||
1503 | static bool tt_check_roam_count(struct bat_priv *bat_priv, | ||
1504 | uint8_t *client) | ||
1505 | { | ||
1506 | struct tt_roam_node *tt_roam_node; | ||
1507 | bool ret = false; | ||
1508 | |||
1509 | spin_lock_bh(&bat_priv->tt_roam_list_lock); | ||
1510 | /* The new tt_req will be issued only if I'm not waiting for a | ||
1511 | * reply from the same orig_node yet */ | ||
1512 | list_for_each_entry(tt_roam_node, &bat_priv->tt_roam_list, list) { | ||
1513 | if (!compare_eth(tt_roam_node->addr, client)) | ||
1514 | continue; | ||
1515 | |||
1516 | if (is_out_of_time(tt_roam_node->first_time, | ||
1517 | ROAMING_MAX_TIME * 1000)) | ||
1518 | continue; | ||
1519 | |||
1520 | if (!atomic_dec_not_zero(&tt_roam_node->counter)) | ||
1521 | /* Sorry, you roamed too many times! */ | ||
1522 | goto unlock; | ||
1523 | ret = true; | ||
1524 | break; | ||
1525 | } | ||
1526 | |||
1527 | if (!ret) { | ||
1528 | tt_roam_node = kmalloc(sizeof(*tt_roam_node), GFP_ATOMIC); | ||
1529 | if (!tt_roam_node) | ||
1530 | goto unlock; | ||
1531 | |||
1532 | tt_roam_node->first_time = jiffies; | ||
1533 | atomic_set(&tt_roam_node->counter, ROAMING_MAX_COUNT - 1); | ||
1534 | memcpy(tt_roam_node->addr, client, ETH_ALEN); | ||
1535 | |||
1536 | list_add(&tt_roam_node->list, &bat_priv->tt_roam_list); | ||
1537 | ret = true; | ||
1538 | } | ||
1539 | |||
1540 | unlock: | ||
1541 | spin_unlock_bh(&bat_priv->tt_roam_list_lock); | ||
1542 | return ret; | ||
1543 | } | ||
1544 | |||
1545 | void send_roam_adv(struct bat_priv *bat_priv, uint8_t *client, | ||
1546 | struct orig_node *orig_node) | ||
1547 | { | ||
1548 | struct neigh_node *neigh_node = NULL; | ||
1549 | struct sk_buff *skb = NULL; | ||
1550 | struct roam_adv_packet *roam_adv_packet; | ||
1551 | int ret = 1; | ||
1552 | struct hard_iface *primary_if; | ||
1553 | |||
1554 | /* before going on we have to check whether the client has | ||
1555 | * already roamed to us too many times */ | ||
1556 | if (!tt_check_roam_count(bat_priv, client)) | ||
1557 | goto out; | ||
1558 | |||
1559 | skb = dev_alloc_skb(sizeof(struct roam_adv_packet) + ETH_HLEN); | ||
1560 | if (!skb) | ||
1561 | goto out; | ||
1562 | |||
1563 | skb_reserve(skb, ETH_HLEN); | ||
1564 | |||
1565 | roam_adv_packet = (struct roam_adv_packet *)skb_put(skb, | ||
1566 | sizeof(struct roam_adv_packet)); | ||
1567 | |||
1568 | roam_adv_packet->packet_type = BAT_ROAM_ADV; | ||
1569 | roam_adv_packet->version = COMPAT_VERSION; | ||
1570 | roam_adv_packet->ttl = TTL; | ||
1571 | primary_if = primary_if_get_selected(bat_priv); | ||
1572 | if (!primary_if) | ||
1573 | goto out; | ||
1574 | memcpy(roam_adv_packet->src, primary_if->net_dev->dev_addr, ETH_ALEN); | ||
1575 | hardif_free_ref(primary_if); | ||
1576 | memcpy(roam_adv_packet->dst, orig_node->orig, ETH_ALEN); | ||
1577 | memcpy(roam_adv_packet->client, client, ETH_ALEN); | ||
1578 | |||
1579 | neigh_node = orig_node_get_router(orig_node); | ||
1580 | if (!neigh_node) | ||
1581 | goto out; | ||
1582 | |||
1583 | bat_dbg(DBG_TT, bat_priv, | ||
1584 | "Sending ROAMING_ADV to %pM (client %pM) via %pM\n", | ||
1585 | orig_node->orig, client, neigh_node->addr); | ||
1586 | |||
1587 | send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr); | ||
1588 | ret = 0; | ||
1589 | |||
1590 | out: | ||
1591 | if (neigh_node) | ||
1592 | neigh_node_free_ref(neigh_node); | ||
1593 | if (ret) | ||
1594 | kfree_skb(skb); | ||
1595 | return; | ||
1596 | } | ||
1597 | |||
1598 | static void tt_purge(struct work_struct *work) | ||
1599 | { | ||
1600 | struct delayed_work *delayed_work = | ||
1601 | container_of(work, struct delayed_work, work); | ||
1602 | struct bat_priv *bat_priv = | ||
1603 | container_of(delayed_work, struct bat_priv, tt_work); | ||
1604 | |||
1605 | tt_local_purge(bat_priv); | ||
1606 | tt_global_roam_purge(bat_priv); | ||
1607 | tt_req_purge(bat_priv); | ||
1608 | tt_roam_purge(bat_priv); | ||
1609 | |||
1610 | tt_start_timer(bat_priv); | ||
1611 | } | ||
1612 | |||
1613 | void tt_free(struct bat_priv *bat_priv) | ||
1614 | { | ||
1615 | cancel_delayed_work_sync(&bat_priv->tt_work); | ||
1616 | |||
1617 | tt_local_table_free(bat_priv); | ||
1618 | tt_global_table_free(bat_priv); | ||
1619 | tt_req_list_free(bat_priv); | ||
1620 | tt_changes_list_free(bat_priv); | ||
1621 | tt_roam_list_free(bat_priv); | ||
1622 | |||
1623 | kfree(bat_priv->tt_buff); | ||
1624 | } | ||
diff --git a/net/batman-adv/translation-table.h b/net/batman-adv/translation-table.h index 0f2b9905cfc4..1cd2d39529fe 100644 --- a/net/batman-adv/translation-table.h +++ b/net/batman-adv/translation-table.h | |||
@@ -22,23 +22,45 @@ | |||
22 | #ifndef _NET_BATMAN_ADV_TRANSLATION_TABLE_H_ | 22 | #ifndef _NET_BATMAN_ADV_TRANSLATION_TABLE_H_ |
23 | #define _NET_BATMAN_ADV_TRANSLATION_TABLE_H_ | 23 | #define _NET_BATMAN_ADV_TRANSLATION_TABLE_H_ |
24 | 24 | ||
25 | int tt_local_init(struct bat_priv *bat_priv); | 25 | int tt_len(int changes_num); |
26 | int tt_changes_fill_buffer(struct bat_priv *bat_priv, | ||
27 | unsigned char *buff, int buff_len); | ||
28 | int tt_init(struct bat_priv *bat_priv); | ||
26 | void tt_local_add(struct net_device *soft_iface, const uint8_t *addr); | 29 | void tt_local_add(struct net_device *soft_iface, const uint8_t *addr); |
27 | void tt_local_remove(struct bat_priv *bat_priv, | 30 | void tt_local_remove(struct bat_priv *bat_priv, |
28 | const uint8_t *addr, const char *message); | 31 | const uint8_t *addr, const char *message, bool roaming); |
29 | int tt_local_fill_buffer(struct bat_priv *bat_priv, | ||
30 | unsigned char *buff, int buff_len); | ||
31 | int tt_local_seq_print_text(struct seq_file *seq, void *offset); | 32 | int tt_local_seq_print_text(struct seq_file *seq, void *offset); |
32 | void tt_local_free(struct bat_priv *bat_priv); | ||
33 | int tt_global_init(struct bat_priv *bat_priv); | ||
34 | void tt_global_add_orig(struct bat_priv *bat_priv, | 33 | void tt_global_add_orig(struct bat_priv *bat_priv, |
35 | struct orig_node *orig_node, | 34 | struct orig_node *orig_node, |
36 | const unsigned char *tt_buff, int tt_buff_len); | 35 | const unsigned char *tt_buff, int tt_buff_len); |
36 | int tt_global_add(struct bat_priv *bat_priv, | ||
37 | struct orig_node *orig_node, const unsigned char *addr, | ||
38 | uint8_t ttvn, bool roaming); | ||
37 | int tt_global_seq_print_text(struct seq_file *seq, void *offset); | 39 | int tt_global_seq_print_text(struct seq_file *seq, void *offset); |
38 | void tt_global_del_orig(struct bat_priv *bat_priv, | 40 | void tt_global_del_orig(struct bat_priv *bat_priv, |
39 | struct orig_node *orig_node, const char *message); | 41 | struct orig_node *orig_node, const char *message); |
40 | void tt_global_free(struct bat_priv *bat_priv); | 42 | void tt_global_del(struct bat_priv *bat_priv, |
43 | struct orig_node *orig_node, const unsigned char *addr, | ||
44 | const char *message, bool roaming); | ||
41 | struct orig_node *transtable_search(struct bat_priv *bat_priv, | 45 | struct orig_node *transtable_search(struct bat_priv *bat_priv, |
42 | const uint8_t *addr); | 46 | const uint8_t *addr); |
47 | void tt_save_orig_buffer(struct bat_priv *bat_priv, struct orig_node *orig_node, | ||
48 | const unsigned char *tt_buff, uint8_t tt_num_changes); | ||
49 | uint16_t tt_local_crc(struct bat_priv *bat_priv); | ||
50 | uint16_t tt_global_crc(struct bat_priv *bat_priv, struct orig_node *orig_node); | ||
51 | void tt_free(struct bat_priv *bat_priv); | ||
52 | int send_tt_request(struct bat_priv *bat_priv, | ||
53 | struct orig_node *dst_orig_node, uint8_t hvn, | ||
54 | uint16_t tt_crc, bool full_table); | ||
55 | bool send_tt_response(struct bat_priv *bat_priv, | ||
56 | struct tt_query_packet *tt_request); | ||
57 | void tt_update_changes(struct bat_priv *bat_priv, struct orig_node *orig_node, | ||
58 | uint16_t tt_num_changes, uint8_t ttvn, | ||
59 | struct tt_change *tt_change); | ||
60 | bool is_my_client(struct bat_priv *bat_priv, const uint8_t *addr); | ||
61 | void handle_tt_response(struct bat_priv *bat_priv, | ||
62 | struct tt_query_packet *tt_response); | ||
63 | void send_roam_adv(struct bat_priv *bat_priv, uint8_t *client, | ||
64 | struct orig_node *orig_node); | ||
43 | 65 | ||
44 | #endif /* _NET_BATMAN_ADV_TRANSLATION_TABLE_H_ */ | 66 | #endif /* _NET_BATMAN_ADV_TRANSLATION_TABLE_H_ */ |
diff --git a/net/batman-adv/types.h b/net/batman-adv/types.h index 65b32223d104..85cf1224881e 100644 --- a/net/batman-adv/types.h +++ b/net/batman-adv/types.h | |||
@@ -75,8 +75,18 @@ struct orig_node { | |||
75 | unsigned long batman_seqno_reset; | 75 | unsigned long batman_seqno_reset; |
76 | uint8_t gw_flags; | 76 | uint8_t gw_flags; |
77 | uint8_t flags; | 77 | uint8_t flags; |
78 | atomic_t last_ttvn; /* last seen translation table version number */ | ||
79 | uint16_t tt_crc; | ||
78 | unsigned char *tt_buff; | 80 | unsigned char *tt_buff; |
79 | int16_t tt_buff_len; | 81 | int16_t tt_buff_len; |
82 | spinlock_t tt_buff_lock; /* protects tt_buff */ | ||
83 | atomic_t tt_size; | ||
84 | /* The tt_poss_change flag is used to detect an ongoing roaming phase. | ||
85 | * If true, then I sent a Roaming_adv to this orig_node and I have to | ||
86 | * inspect every packet directed to it to check whether it is still | ||
87 | * the true destination or not. This flag will be reset to false as | ||
88 | * soon as I receive a new TTVN from this orig_node */ | ||
89 | bool tt_poss_change; | ||
80 | uint32_t last_real_seqno; | 90 | uint32_t last_real_seqno; |
81 | uint8_t last_ttl; | 91 | uint8_t last_ttl; |
82 | unsigned long bcast_bits[NUM_WORDS]; | 92 | unsigned long bcast_bits[NUM_WORDS]; |
@@ -94,6 +104,7 @@ struct orig_node { | |||
94 | spinlock_t ogm_cnt_lock; | 104 | spinlock_t ogm_cnt_lock; |
95 | /* bcast_seqno_lock protects bcast_bits, last_bcast_seqno */ | 105 | /* bcast_seqno_lock protects bcast_bits, last_bcast_seqno */ |
96 | spinlock_t bcast_seqno_lock; | 106 | spinlock_t bcast_seqno_lock; |
107 | spinlock_t tt_list_lock; /* protects tt_list */ | ||
97 | atomic_t bond_candidates; | 108 | atomic_t bond_candidates; |
98 | struct list_head bond_list; | 109 | struct list_head bond_list; |
99 | }; | 110 | }; |
@@ -145,6 +156,15 @@ struct bat_priv { | |||
145 | atomic_t bcast_seqno; | 156 | atomic_t bcast_seqno; |
146 | atomic_t bcast_queue_left; | 157 | atomic_t bcast_queue_left; |
147 | atomic_t batman_queue_left; | 158 | atomic_t batman_queue_left; |
159 | atomic_t ttvn; /* tranlation table version number */ | ||
160 | atomic_t tt_ogm_append_cnt; | ||
161 | atomic_t tt_local_changes; /* changes registered in a OGM interval */ | ||
162 | /* The tt_poss_change flag is used to detect an ongoing roaming phase. | ||
163 | * If true, then I received a Roaming_adv and I have to inspect every | ||
164 | * packet directed to me to check whether I am still the true | ||
165 | * destination or not. This flag will be reset to false as soon as I | ||
166 | * increase my TTVN */ | ||
167 | bool tt_poss_change; | ||
148 | char num_ifaces; | 168 | char num_ifaces; |
149 | struct debug_log *debug_log; | 169 | struct debug_log *debug_log; |
150 | struct kobject *mesh_obj; | 170 | struct kobject *mesh_obj; |
@@ -153,26 +173,35 @@ struct bat_priv { | |||
153 | struct hlist_head forw_bcast_list; | 173 | struct hlist_head forw_bcast_list; |
154 | struct hlist_head gw_list; | 174 | struct hlist_head gw_list; |
155 | struct hlist_head softif_neigh_vids; | 175 | struct hlist_head softif_neigh_vids; |
176 | struct list_head tt_changes_list; /* tracks changes in a OGM int */ | ||
156 | struct list_head vis_send_list; | 177 | struct list_head vis_send_list; |
157 | struct hashtable_t *orig_hash; | 178 | struct hashtable_t *orig_hash; |
158 | struct hashtable_t *tt_local_hash; | 179 | struct hashtable_t *tt_local_hash; |
159 | struct hashtable_t *tt_global_hash; | 180 | struct hashtable_t *tt_global_hash; |
181 | struct list_head tt_req_list; /* list of pending tt_requests */ | ||
182 | struct list_head tt_roam_list; | ||
160 | struct hashtable_t *vis_hash; | 183 | struct hashtable_t *vis_hash; |
161 | spinlock_t forw_bat_list_lock; /* protects forw_bat_list */ | 184 | spinlock_t forw_bat_list_lock; /* protects forw_bat_list */ |
162 | spinlock_t forw_bcast_list_lock; /* protects */ | 185 | spinlock_t forw_bcast_list_lock; /* protects */ |
163 | spinlock_t tt_lhash_lock; /* protects tt_local_hash */ | 186 | spinlock_t tt_changes_list_lock; /* protects tt_changes */ |
164 | spinlock_t tt_ghash_lock; /* protects tt_global_hash */ | 187 | spinlock_t tt_req_list_lock; /* protects tt_req_list */ |
188 | spinlock_t tt_roam_list_lock; /* protects tt_roam_list */ | ||
165 | spinlock_t gw_list_lock; /* protects gw_list and curr_gw */ | 189 | spinlock_t gw_list_lock; /* protects gw_list and curr_gw */ |
166 | spinlock_t vis_hash_lock; /* protects vis_hash */ | 190 | spinlock_t vis_hash_lock; /* protects vis_hash */ |
167 | spinlock_t vis_list_lock; /* protects vis_info::recv_list */ | 191 | spinlock_t vis_list_lock; /* protects vis_info::recv_list */ |
168 | spinlock_t softif_neigh_lock; /* protects soft-interface neigh list */ | 192 | spinlock_t softif_neigh_lock; /* protects soft-interface neigh list */ |
169 | spinlock_t softif_neigh_vid_lock; /* protects soft-interface vid list */ | 193 | spinlock_t softif_neigh_vid_lock; /* protects soft-interface vid list */ |
170 | int16_t num_local_tt; | 194 | atomic_t num_local_tt; |
171 | atomic_t tt_local_changed; | 195 | /* Checksum of the local table, recomputed before sending a new OGM */ |
196 | atomic_t tt_crc; | ||
197 | unsigned char *tt_buff; | ||
198 | int16_t tt_buff_len; | ||
199 | spinlock_t tt_buff_lock; /* protects tt_buff */ | ||
172 | struct delayed_work tt_work; | 200 | struct delayed_work tt_work; |
173 | struct delayed_work orig_work; | 201 | struct delayed_work orig_work; |
174 | struct delayed_work vis_work; | 202 | struct delayed_work vis_work; |
175 | struct gw_node __rcu *curr_gw; /* rcu protected pointer */ | 203 | struct gw_node __rcu *curr_gw; /* rcu protected pointer */ |
204 | atomic_t gw_reselect; | ||
176 | struct hard_iface __rcu *primary_if; /* rcu protected pointer */ | 205 | struct hard_iface __rcu *primary_if; /* rcu protected pointer */ |
177 | struct vis_info *my_vis_info; | 206 | struct vis_info *my_vis_info; |
178 | }; | 207 | }; |
@@ -196,13 +225,38 @@ struct tt_local_entry { | |||
196 | uint8_t addr[ETH_ALEN]; | 225 | uint8_t addr[ETH_ALEN]; |
197 | unsigned long last_seen; | 226 | unsigned long last_seen; |
198 | char never_purge; | 227 | char never_purge; |
228 | atomic_t refcount; | ||
229 | struct rcu_head rcu; | ||
199 | struct hlist_node hash_entry; | 230 | struct hlist_node hash_entry; |
200 | }; | 231 | }; |
201 | 232 | ||
202 | struct tt_global_entry { | 233 | struct tt_global_entry { |
203 | uint8_t addr[ETH_ALEN]; | 234 | uint8_t addr[ETH_ALEN]; |
204 | struct orig_node *orig_node; | 235 | struct orig_node *orig_node; |
205 | struct hlist_node hash_entry; | 236 | uint8_t ttvn; |
237 | uint8_t flags; /* only TT_GLOBAL_ROAM is used */ | ||
238 | unsigned long roam_at; /* time at which TT_GLOBAL_ROAM was set */ | ||
239 | atomic_t refcount; | ||
240 | struct rcu_head rcu; | ||
241 | struct hlist_node hash_entry; /* entry in the global table */ | ||
242 | }; | ||
243 | |||
244 | struct tt_change_node { | ||
245 | struct list_head list; | ||
246 | struct tt_change change; | ||
247 | }; | ||
248 | |||
249 | struct tt_req_node { | ||
250 | uint8_t addr[ETH_ALEN]; | ||
251 | unsigned long issued_at; | ||
252 | struct list_head list; | ||
253 | }; | ||
254 | |||
255 | struct tt_roam_node { | ||
256 | uint8_t addr[ETH_ALEN]; | ||
257 | atomic_t counter; | ||
258 | unsigned long first_time; | ||
259 | struct list_head list; | ||
206 | }; | 260 | }; |
207 | 261 | ||
208 | /** | 262 | /** |
diff --git a/net/batman-adv/unicast.c b/net/batman-adv/unicast.c index 6eabf42f8822..32b125fb3d3b 100644 --- a/net/batman-adv/unicast.c +++ b/net/batman-adv/unicast.c | |||
@@ -325,6 +325,9 @@ find_router: | |||
325 | unicast_packet->ttl = TTL; | 325 | unicast_packet->ttl = TTL; |
326 | /* copy the destination for faster routing */ | 326 | /* copy the destination for faster routing */ |
327 | memcpy(unicast_packet->dest, orig_node->orig, ETH_ALEN); | 327 | memcpy(unicast_packet->dest, orig_node->orig, ETH_ALEN); |
328 | /* set the destination tt version number */ | ||
329 | unicast_packet->ttvn = | ||
330 | (uint8_t)atomic_read(&orig_node->last_ttvn); | ||
328 | 331 | ||
329 | if (atomic_read(&bat_priv->fragmentation) && | 332 | if (atomic_read(&bat_priv->fragmentation) && |
330 | data_len + sizeof(*unicast_packet) > | 333 | data_len + sizeof(*unicast_packet) > |
diff --git a/net/batman-adv/vis.c b/net/batman-adv/vis.c index 355c6e590b0c..8a1b98589d76 100644 --- a/net/batman-adv/vis.c +++ b/net/batman-adv/vis.c | |||
@@ -665,11 +665,12 @@ next: | |||
665 | 665 | ||
666 | hash = bat_priv->tt_local_hash; | 666 | hash = bat_priv->tt_local_hash; |
667 | 667 | ||
668 | spin_lock_bh(&bat_priv->tt_lhash_lock); | ||
669 | for (i = 0; i < hash->size; i++) { | 668 | for (i = 0; i < hash->size; i++) { |
670 | head = &hash->table[i]; | 669 | head = &hash->table[i]; |
671 | 670 | ||
672 | hlist_for_each_entry(tt_local_entry, node, head, hash_entry) { | 671 | rcu_read_lock(); |
672 | hlist_for_each_entry_rcu(tt_local_entry, node, head, | ||
673 | hash_entry) { | ||
673 | entry = (struct vis_info_entry *) | 674 | entry = (struct vis_info_entry *) |
674 | skb_put(info->skb_packet, | 675 | skb_put(info->skb_packet, |
675 | sizeof(*entry)); | 676 | sizeof(*entry)); |
@@ -678,14 +679,12 @@ next: | |||
678 | entry->quality = 0; /* 0 means TT */ | 679 | entry->quality = 0; /* 0 means TT */ |
679 | packet->entries++; | 680 | packet->entries++; |
680 | 681 | ||
681 | if (vis_packet_full(info)) { | 682 | if (vis_packet_full(info)) |
682 | spin_unlock_bh(&bat_priv->tt_lhash_lock); | 683 | goto unlock; |
683 | return 0; | ||
684 | } | ||
685 | } | 684 | } |
685 | rcu_read_unlock(); | ||
686 | } | 686 | } |
687 | 687 | ||
688 | spin_unlock_bh(&bat_priv->tt_lhash_lock); | ||
689 | return 0; | 688 | return 0; |
690 | 689 | ||
691 | unlock: | 690 | unlock: |