diff options
author | David S. Miller <davem@davemloft.net> | 2011-05-08 18:39:11 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2011-05-08 18:39:11 -0400 |
commit | 02e73c1edc3746e308d1768a27fdc8121f641ab1 (patch) | |
tree | a3db8009e4549e3d85905e11ac2bf8e64df1763f /net/batman-adv | |
parent | c5216cc70fa769e5a51837f2cf07c4a0aa734fcf (diff) | |
parent | 27aea2128ec09924dfe08e97739b2bf8b15c8619 (diff) |
Merge branch 'batman-adv/next' of git://git.open-mesh.org/ecsv/linux-merge
Diffstat (limited to 'net/batman-adv')
-rw-r--r-- | net/batman-adv/aggregation.c | 16 | ||||
-rw-r--r-- | net/batman-adv/aggregation.h | 4 | ||||
-rw-r--r-- | net/batman-adv/bat_debugfs.c | 4 | ||||
-rw-r--r-- | net/batman-adv/bat_sysfs.c | 16 | ||||
-rw-r--r-- | net/batman-adv/hard-interface.c | 36 | ||||
-rw-r--r-- | net/batman-adv/main.c | 20 | ||||
-rw-r--r-- | net/batman-adv/main.h | 42 | ||||
-rw-r--r-- | net/batman-adv/originator.c | 10 | ||||
-rw-r--r-- | net/batman-adv/packet.h | 5 | ||||
-rw-r--r-- | net/batman-adv/routing.c | 162 | ||||
-rw-r--r-- | net/batman-adv/routing.h | 6 | ||||
-rw-r--r-- | net/batman-adv/send.c | 16 | ||||
-rw-r--r-- | net/batman-adv/send.h | 2 | ||||
-rw-r--r-- | net/batman-adv/soft-interface.c | 409 | ||||
-rw-r--r-- | net/batman-adv/translation-table.c | 417 | ||||
-rw-r--r-- | net/batman-adv/translation-table.h | 24 | ||||
-rw-r--r-- | net/batman-adv/types.h | 49 | ||||
-rw-r--r-- | net/batman-adv/unicast.c | 2 | ||||
-rw-r--r-- | net/batman-adv/vis.c | 18 |
19 files changed, 706 insertions, 552 deletions
diff --git a/net/batman-adv/aggregation.c b/net/batman-adv/aggregation.c index c11788c4c1a1..9b9459024479 100644 --- a/net/batman-adv/aggregation.c +++ b/net/batman-adv/aggregation.c | |||
@@ -24,10 +24,10 @@ | |||
24 | #include "send.h" | 24 | #include "send.h" |
25 | #include "routing.h" | 25 | #include "routing.h" |
26 | 26 | ||
27 | /* calculate the size of the hna information for a given packet */ | 27 | /* calculate the size of the tt information for a given packet */ |
28 | static int hna_len(struct batman_packet *batman_packet) | 28 | static int tt_len(struct batman_packet *batman_packet) |
29 | { | 29 | { |
30 | return batman_packet->num_hna * ETH_ALEN; | 30 | return batman_packet->num_tt * ETH_ALEN; |
31 | } | 31 | } |
32 | 32 | ||
33 | /* return true if new_packet can be aggregated with forw_packet */ | 33 | /* return true if new_packet can be aggregated with forw_packet */ |
@@ -250,7 +250,7 @@ void receive_aggr_bat_packet(struct ethhdr *ethhdr, unsigned char *packet_buff, | |||
250 | { | 250 | { |
251 | struct batman_packet *batman_packet; | 251 | struct batman_packet *batman_packet; |
252 | int buff_pos = 0; | 252 | int buff_pos = 0; |
253 | unsigned char *hna_buff; | 253 | unsigned char *tt_buff; |
254 | 254 | ||
255 | batman_packet = (struct batman_packet *)packet_buff; | 255 | batman_packet = (struct batman_packet *)packet_buff; |
256 | 256 | ||
@@ -259,14 +259,14 @@ void receive_aggr_bat_packet(struct ethhdr *ethhdr, unsigned char *packet_buff, | |||
259 | orig_interval. */ | 259 | orig_interval. */ |
260 | batman_packet->seqno = ntohl(batman_packet->seqno); | 260 | batman_packet->seqno = ntohl(batman_packet->seqno); |
261 | 261 | ||
262 | hna_buff = packet_buff + buff_pos + BAT_PACKET_LEN; | 262 | tt_buff = packet_buff + buff_pos + BAT_PACKET_LEN; |
263 | receive_bat_packet(ethhdr, batman_packet, | 263 | receive_bat_packet(ethhdr, batman_packet, |
264 | hna_buff, hna_len(batman_packet), | 264 | tt_buff, tt_len(batman_packet), |
265 | if_incoming); | 265 | if_incoming); |
266 | 266 | ||
267 | buff_pos += BAT_PACKET_LEN + hna_len(batman_packet); | 267 | buff_pos += BAT_PACKET_LEN + tt_len(batman_packet); |
268 | batman_packet = (struct batman_packet *) | 268 | batman_packet = (struct batman_packet *) |
269 | (packet_buff + buff_pos); | 269 | (packet_buff + buff_pos); |
270 | } while (aggregated_packet(buff_pos, packet_len, | 270 | } while (aggregated_packet(buff_pos, packet_len, |
271 | batman_packet->num_hna)); | 271 | batman_packet->num_tt)); |
272 | } | 272 | } |
diff --git a/net/batman-adv/aggregation.h b/net/batman-adv/aggregation.h index 062204289d1f..7e6d72fbf540 100644 --- a/net/batman-adv/aggregation.h +++ b/net/batman-adv/aggregation.h | |||
@@ -25,9 +25,9 @@ | |||
25 | #include "main.h" | 25 | #include "main.h" |
26 | 26 | ||
27 | /* is there another aggregated packet here? */ | 27 | /* is there another aggregated packet here? */ |
28 | static inline int aggregated_packet(int buff_pos, int packet_len, int num_hna) | 28 | static inline int aggregated_packet(int buff_pos, int packet_len, int num_tt) |
29 | { | 29 | { |
30 | int next_buff_pos = buff_pos + BAT_PACKET_LEN + (num_hna * ETH_ALEN); | 30 | int next_buff_pos = buff_pos + BAT_PACKET_LEN + (num_tt * ETH_ALEN); |
31 | 31 | ||
32 | return (next_buff_pos <= packet_len) && | 32 | return (next_buff_pos <= packet_len) && |
33 | (next_buff_pos <= MAX_AGGREGATION_BYTES); | 33 | (next_buff_pos <= MAX_AGGREGATION_BYTES); |
diff --git a/net/batman-adv/bat_debugfs.c b/net/batman-adv/bat_debugfs.c index 0e9d43509935..abaeec5f6247 100644 --- a/net/batman-adv/bat_debugfs.c +++ b/net/batman-adv/bat_debugfs.c | |||
@@ -241,13 +241,13 @@ static int softif_neigh_open(struct inode *inode, struct file *file) | |||
241 | static int transtable_global_open(struct inode *inode, struct file *file) | 241 | static int transtable_global_open(struct inode *inode, struct file *file) |
242 | { | 242 | { |
243 | struct net_device *net_dev = (struct net_device *)inode->i_private; | 243 | struct net_device *net_dev = (struct net_device *)inode->i_private; |
244 | return single_open(file, hna_global_seq_print_text, net_dev); | 244 | return single_open(file, tt_global_seq_print_text, net_dev); |
245 | } | 245 | } |
246 | 246 | ||
247 | static int transtable_local_open(struct inode *inode, struct file *file) | 247 | static int transtable_local_open(struct inode *inode, struct file *file) |
248 | { | 248 | { |
249 | struct net_device *net_dev = (struct net_device *)inode->i_private; | 249 | struct net_device *net_dev = (struct net_device *)inode->i_private; |
250 | return single_open(file, hna_local_seq_print_text, net_dev); | 250 | return single_open(file, tt_local_seq_print_text, net_dev); |
251 | } | 251 | } |
252 | 252 | ||
253 | static int vis_data_open(struct inode *inode, struct file *file) | 253 | static int vis_data_open(struct inode *inode, struct file *file) |
diff --git a/net/batman-adv/bat_sysfs.c b/net/batman-adv/bat_sysfs.c index e449bf6353e0..497a0700cc3c 100644 --- a/net/batman-adv/bat_sysfs.c +++ b/net/batman-adv/bat_sysfs.c | |||
@@ -488,22 +488,24 @@ static ssize_t store_mesh_iface(struct kobject *kobj, struct attribute *attr, | |||
488 | (strncmp(hard_iface->soft_iface->name, buff, IFNAMSIZ) == 0)) | 488 | (strncmp(hard_iface->soft_iface->name, buff, IFNAMSIZ) == 0)) |
489 | goto out; | 489 | goto out; |
490 | 490 | ||
491 | if (!rtnl_trylock()) { | ||
492 | ret = -ERESTARTSYS; | ||
493 | goto out; | ||
494 | } | ||
495 | |||
491 | if (status_tmp == IF_NOT_IN_USE) { | 496 | if (status_tmp == IF_NOT_IN_USE) { |
492 | rtnl_lock(); | ||
493 | hardif_disable_interface(hard_iface); | 497 | hardif_disable_interface(hard_iface); |
494 | rtnl_unlock(); | 498 | goto unlock; |
495 | goto out; | ||
496 | } | 499 | } |
497 | 500 | ||
498 | /* if the interface already is in use */ | 501 | /* if the interface already is in use */ |
499 | if (hard_iface->if_status != IF_NOT_IN_USE) { | 502 | if (hard_iface->if_status != IF_NOT_IN_USE) |
500 | rtnl_lock(); | ||
501 | hardif_disable_interface(hard_iface); | 503 | hardif_disable_interface(hard_iface); |
502 | rtnl_unlock(); | ||
503 | } | ||
504 | 504 | ||
505 | ret = hardif_enable_interface(hard_iface, buff); | 505 | ret = hardif_enable_interface(hard_iface, buff); |
506 | 506 | ||
507 | unlock: | ||
508 | rtnl_unlock(); | ||
507 | out: | 509 | out: |
508 | hardif_free_ref(hard_iface); | 510 | hardif_free_ref(hard_iface); |
509 | return ret; | 511 | return ret; |
diff --git a/net/batman-adv/hard-interface.c b/net/batman-adv/hard-interface.c index 3e888f133d75..dfbfccc9fe40 100644 --- a/net/batman-adv/hard-interface.c +++ b/net/batman-adv/hard-interface.c | |||
@@ -31,9 +31,6 @@ | |||
31 | 31 | ||
32 | #include <linux/if_arp.h> | 32 | #include <linux/if_arp.h> |
33 | 33 | ||
34 | /* protect update critical side of hardif_list - but not the content */ | ||
35 | static DEFINE_SPINLOCK(hardif_list_lock); | ||
36 | |||
37 | 34 | ||
38 | static int batman_skb_recv(struct sk_buff *skb, | 35 | static int batman_skb_recv(struct sk_buff *skb, |
39 | struct net_device *dev, | 36 | struct net_device *dev, |
@@ -136,7 +133,7 @@ static void primary_if_select(struct bat_priv *bat_priv, | |||
136 | struct hard_iface *curr_hard_iface; | 133 | struct hard_iface *curr_hard_iface; |
137 | struct batman_packet *batman_packet; | 134 | struct batman_packet *batman_packet; |
138 | 135 | ||
139 | spin_lock_bh(&hardif_list_lock); | 136 | ASSERT_RTNL(); |
140 | 137 | ||
141 | if (new_hard_iface && !atomic_inc_not_zero(&new_hard_iface->refcount)) | 138 | if (new_hard_iface && !atomic_inc_not_zero(&new_hard_iface->refcount)) |
142 | new_hard_iface = NULL; | 139 | new_hard_iface = NULL; |
@@ -148,7 +145,7 @@ static void primary_if_select(struct bat_priv *bat_priv, | |||
148 | hardif_free_ref(curr_hard_iface); | 145 | hardif_free_ref(curr_hard_iface); |
149 | 146 | ||
150 | if (!new_hard_iface) | 147 | if (!new_hard_iface) |
151 | goto out; | 148 | return; |
152 | 149 | ||
153 | batman_packet = (struct batman_packet *)(new_hard_iface->packet_buff); | 150 | batman_packet = (struct batman_packet *)(new_hard_iface->packet_buff); |
154 | batman_packet->flags = PRIMARIES_FIRST_HOP; | 151 | batman_packet->flags = PRIMARIES_FIRST_HOP; |
@@ -157,13 +154,10 @@ static void primary_if_select(struct bat_priv *bat_priv, | |||
157 | primary_if_update_addr(bat_priv); | 154 | primary_if_update_addr(bat_priv); |
158 | 155 | ||
159 | /*** | 156 | /*** |
160 | * hacky trick to make sure that we send the HNA information via | 157 | * hacky trick to make sure that we send the TT information via |
161 | * our new primary interface | 158 | * our new primary interface |
162 | */ | 159 | */ |
163 | atomic_set(&bat_priv->hna_local_changed, 1); | 160 | atomic_set(&bat_priv->tt_local_changed, 1); |
164 | |||
165 | out: | ||
166 | spin_unlock_bh(&hardif_list_lock); | ||
167 | } | 161 | } |
168 | 162 | ||
169 | static bool hardif_is_iface_up(struct hard_iface *hard_iface) | 163 | static bool hardif_is_iface_up(struct hard_iface *hard_iface) |
@@ -345,7 +339,7 @@ int hardif_enable_interface(struct hard_iface *hard_iface, char *iface_name) | |||
345 | batman_packet->flags = 0; | 339 | batman_packet->flags = 0; |
346 | batman_packet->ttl = 2; | 340 | batman_packet->ttl = 2; |
347 | batman_packet->tq = TQ_MAX_VALUE; | 341 | batman_packet->tq = TQ_MAX_VALUE; |
348 | batman_packet->num_hna = 0; | 342 | batman_packet->num_tt = 0; |
349 | 343 | ||
350 | hard_iface->if_num = bat_priv->num_ifaces; | 344 | hard_iface->if_num = bat_priv->num_ifaces; |
351 | bat_priv->num_ifaces++; | 345 | bat_priv->num_ifaces++; |
@@ -456,6 +450,8 @@ static struct hard_iface *hardif_add_interface(struct net_device *net_dev) | |||
456 | struct hard_iface *hard_iface; | 450 | struct hard_iface *hard_iface; |
457 | int ret; | 451 | int ret; |
458 | 452 | ||
453 | ASSERT_RTNL(); | ||
454 | |||
459 | ret = is_valid_iface(net_dev); | 455 | ret = is_valid_iface(net_dev); |
460 | if (ret != 1) | 456 | if (ret != 1) |
461 | goto out; | 457 | goto out; |
@@ -482,10 +478,7 @@ static struct hard_iface *hardif_add_interface(struct net_device *net_dev) | |||
482 | atomic_set(&hard_iface->refcount, 2); | 478 | atomic_set(&hard_iface->refcount, 2); |
483 | 479 | ||
484 | check_known_mac_addr(hard_iface->net_dev); | 480 | check_known_mac_addr(hard_iface->net_dev); |
485 | |||
486 | spin_lock(&hardif_list_lock); | ||
487 | list_add_tail_rcu(&hard_iface->list, &hardif_list); | 481 | list_add_tail_rcu(&hard_iface->list, &hardif_list); |
488 | spin_unlock(&hardif_list_lock); | ||
489 | 482 | ||
490 | return hard_iface; | 483 | return hard_iface; |
491 | 484 | ||
@@ -499,6 +492,8 @@ out: | |||
499 | 492 | ||
500 | static void hardif_remove_interface(struct hard_iface *hard_iface) | 493 | static void hardif_remove_interface(struct hard_iface *hard_iface) |
501 | { | 494 | { |
495 | ASSERT_RTNL(); | ||
496 | |||
502 | /* first deactivate interface */ | 497 | /* first deactivate interface */ |
503 | if (hard_iface->if_status != IF_NOT_IN_USE) | 498 | if (hard_iface->if_status != IF_NOT_IN_USE) |
504 | hardif_disable_interface(hard_iface); | 499 | hardif_disable_interface(hard_iface); |
@@ -514,20 +509,11 @@ static void hardif_remove_interface(struct hard_iface *hard_iface) | |||
514 | void hardif_remove_interfaces(void) | 509 | void hardif_remove_interfaces(void) |
515 | { | 510 | { |
516 | struct hard_iface *hard_iface, *hard_iface_tmp; | 511 | struct hard_iface *hard_iface, *hard_iface_tmp; |
517 | struct list_head if_queue; | ||
518 | |||
519 | INIT_LIST_HEAD(&if_queue); | ||
520 | 512 | ||
521 | spin_lock(&hardif_list_lock); | 513 | rtnl_lock(); |
522 | list_for_each_entry_safe(hard_iface, hard_iface_tmp, | 514 | list_for_each_entry_safe(hard_iface, hard_iface_tmp, |
523 | &hardif_list, list) { | 515 | &hardif_list, list) { |
524 | list_del_rcu(&hard_iface->list); | 516 | list_del_rcu(&hard_iface->list); |
525 | list_add_tail(&hard_iface->list, &if_queue); | ||
526 | } | ||
527 | spin_unlock(&hardif_list_lock); | ||
528 | |||
529 | rtnl_lock(); | ||
530 | list_for_each_entry_safe(hard_iface, hard_iface_tmp, &if_queue, list) { | ||
531 | hardif_remove_interface(hard_iface); | 517 | hardif_remove_interface(hard_iface); |
532 | } | 518 | } |
533 | rtnl_unlock(); | 519 | rtnl_unlock(); |
@@ -556,9 +542,7 @@ static int hard_if_event(struct notifier_block *this, | |||
556 | hardif_deactivate_interface(hard_iface); | 542 | hardif_deactivate_interface(hard_iface); |
557 | break; | 543 | break; |
558 | case NETDEV_UNREGISTER: | 544 | case NETDEV_UNREGISTER: |
559 | spin_lock(&hardif_list_lock); | ||
560 | list_del_rcu(&hard_iface->list); | 545 | list_del_rcu(&hard_iface->list); |
561 | spin_unlock(&hardif_list_lock); | ||
562 | 546 | ||
563 | hardif_remove_interface(hard_iface); | 547 | hardif_remove_interface(hard_iface); |
564 | break; | 548 | break; |
diff --git a/net/batman-adv/main.c b/net/batman-adv/main.c index 709b33bbdf43..0a7cee0076f4 100644 --- a/net/batman-adv/main.c +++ b/net/batman-adv/main.c | |||
@@ -33,6 +33,9 @@ | |||
33 | #include "vis.h" | 33 | #include "vis.h" |
34 | #include "hash.h" | 34 | #include "hash.h" |
35 | 35 | ||
36 | |||
37 | /* List manipulations on hardif_list have to be rtnl_lock()'ed, | ||
38 | * list traversals just rcu-locked */ | ||
36 | struct list_head hardif_list; | 39 | struct list_head hardif_list; |
37 | 40 | ||
38 | unsigned char broadcast_addr[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}; | 41 | unsigned char broadcast_addr[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}; |
@@ -81,28 +84,29 @@ int mesh_init(struct net_device *soft_iface) | |||
81 | 84 | ||
82 | spin_lock_init(&bat_priv->forw_bat_list_lock); | 85 | spin_lock_init(&bat_priv->forw_bat_list_lock); |
83 | spin_lock_init(&bat_priv->forw_bcast_list_lock); | 86 | spin_lock_init(&bat_priv->forw_bcast_list_lock); |
84 | spin_lock_init(&bat_priv->hna_lhash_lock); | 87 | spin_lock_init(&bat_priv->tt_lhash_lock); |
85 | spin_lock_init(&bat_priv->hna_ghash_lock); | 88 | spin_lock_init(&bat_priv->tt_ghash_lock); |
86 | spin_lock_init(&bat_priv->gw_list_lock); | 89 | spin_lock_init(&bat_priv->gw_list_lock); |
87 | spin_lock_init(&bat_priv->vis_hash_lock); | 90 | spin_lock_init(&bat_priv->vis_hash_lock); |
88 | spin_lock_init(&bat_priv->vis_list_lock); | 91 | spin_lock_init(&bat_priv->vis_list_lock); |
89 | spin_lock_init(&bat_priv->softif_neigh_lock); | 92 | spin_lock_init(&bat_priv->softif_neigh_lock); |
93 | spin_lock_init(&bat_priv->softif_neigh_vid_lock); | ||
90 | 94 | ||
91 | INIT_HLIST_HEAD(&bat_priv->forw_bat_list); | 95 | INIT_HLIST_HEAD(&bat_priv->forw_bat_list); |
92 | INIT_HLIST_HEAD(&bat_priv->forw_bcast_list); | 96 | INIT_HLIST_HEAD(&bat_priv->forw_bcast_list); |
93 | INIT_HLIST_HEAD(&bat_priv->gw_list); | 97 | INIT_HLIST_HEAD(&bat_priv->gw_list); |
94 | INIT_HLIST_HEAD(&bat_priv->softif_neigh_list); | 98 | INIT_HLIST_HEAD(&bat_priv->softif_neigh_vids); |
95 | 99 | ||
96 | if (originator_init(bat_priv) < 1) | 100 | if (originator_init(bat_priv) < 1) |
97 | goto err; | 101 | goto err; |
98 | 102 | ||
99 | if (hna_local_init(bat_priv) < 1) | 103 | if (tt_local_init(bat_priv) < 1) |
100 | goto err; | 104 | goto err; |
101 | 105 | ||
102 | if (hna_global_init(bat_priv) < 1) | 106 | if (tt_global_init(bat_priv) < 1) |
103 | goto err; | 107 | goto err; |
104 | 108 | ||
105 | hna_local_add(soft_iface, soft_iface->dev_addr); | 109 | tt_local_add(soft_iface, soft_iface->dev_addr); |
106 | 110 | ||
107 | if (vis_init(bat_priv) < 1) | 111 | if (vis_init(bat_priv) < 1) |
108 | goto err; | 112 | goto err; |
@@ -133,8 +137,8 @@ void mesh_free(struct net_device *soft_iface) | |||
133 | gw_node_purge(bat_priv); | 137 | gw_node_purge(bat_priv); |
134 | originator_free(bat_priv); | 138 | originator_free(bat_priv); |
135 | 139 | ||
136 | hna_local_free(bat_priv); | 140 | tt_local_free(bat_priv); |
137 | hna_global_free(bat_priv); | 141 | tt_global_free(bat_priv); |
138 | 142 | ||
139 | softif_neigh_purge(bat_priv); | 143 | softif_neigh_purge(bat_priv); |
140 | 144 | ||
diff --git a/net/batman-adv/main.h b/net/batman-adv/main.h index ace72852ed7b..148b49e02642 100644 --- a/net/batman-adv/main.h +++ b/net/batman-adv/main.h | |||
@@ -34,16 +34,18 @@ | |||
34 | 34 | ||
35 | #define TQ_MAX_VALUE 255 | 35 | #define TQ_MAX_VALUE 255 |
36 | #define JITTER 20 | 36 | #define JITTER 20 |
37 | #define TTL 50 /* Time To Live of broadcast messages */ | ||
38 | 37 | ||
39 | #define PURGE_TIMEOUT 200 /* purge originators after time in seconds if no | 38 | /* Time To Live of broadcast messages */ |
40 | * valid packet comes in -> TODO: check | 39 | #define TTL 50 |
41 | * influence on TQ_LOCAL_WINDOW_SIZE */ | ||
42 | #define LOCAL_HNA_TIMEOUT 3600 /* in seconds */ | ||
43 | 40 | ||
44 | #define TQ_LOCAL_WINDOW_SIZE 64 /* sliding packet range of received originator | 41 | /* purge originators after time in seconds if no valid packet comes in |
45 | * messages in squence numbers (should be a | 42 | * -> TODO: check influence on TQ_LOCAL_WINDOW_SIZE */ |
46 | * multiple of our word size) */ | 43 | #define PURGE_TIMEOUT 200 |
44 | #define TT_LOCAL_TIMEOUT 3600 /* in seconds */ | ||
45 | |||
46 | /* sliding packet range of received originator messages in squence numbers | ||
47 | * (should be a multiple of our word size) */ | ||
48 | #define TQ_LOCAL_WINDOW_SIZE 64 | ||
47 | #define TQ_GLOBAL_WINDOW_SIZE 5 | 49 | #define TQ_GLOBAL_WINDOW_SIZE 5 |
48 | #define TQ_LOCAL_BIDRECT_SEND_MINIMUM 1 | 50 | #define TQ_LOCAL_BIDRECT_SEND_MINIMUM 1 |
49 | #define TQ_LOCAL_BIDRECT_RECV_MINIMUM 1 | 51 | #define TQ_LOCAL_BIDRECT_RECV_MINIMUM 1 |
@@ -55,21 +57,20 @@ | |||
55 | 57 | ||
56 | #define VIS_INTERVAL 5000 /* 5 seconds */ | 58 | #define VIS_INTERVAL 5000 /* 5 seconds */ |
57 | 59 | ||
58 | /* how much worse secondary interfaces may be to | 60 | /* how much worse secondary interfaces may be to be considered as bonding |
59 | * to be considered as bonding candidates */ | 61 | * candidates */ |
60 | |||
61 | #define BONDING_TQ_THRESHOLD 50 | 62 | #define BONDING_TQ_THRESHOLD 50 |
62 | 63 | ||
63 | #define MAX_AGGREGATION_BYTES 512 /* should not be bigger than 512 bytes or | 64 | /* should not be bigger than 512 bytes or change the size of |
64 | * change the size of | 65 | * forw_packet->direct_link_flags */ |
65 | * forw_packet->direct_link_flags */ | 66 | #define MAX_AGGREGATION_BYTES 512 |
66 | #define MAX_AGGREGATION_MS 100 | 67 | #define MAX_AGGREGATION_MS 100 |
67 | 68 | ||
68 | #define SOFTIF_NEIGH_TIMEOUT 180000 /* 3 minutes */ | 69 | #define SOFTIF_NEIGH_TIMEOUT 180000 /* 3 minutes */ |
69 | 70 | ||
71 | /* don't reset again within 30 seconds */ | ||
70 | #define RESET_PROTECTION_MS 30000 | 72 | #define RESET_PROTECTION_MS 30000 |
71 | #define EXPECTED_SEQNO_RANGE 65536 | 73 | #define EXPECTED_SEQNO_RANGE 65536 |
72 | /* don't reset again within 30 seconds */ | ||
73 | 74 | ||
74 | #define MESH_INACTIVE 0 | 75 | #define MESH_INACTIVE 0 |
75 | #define MESH_ACTIVE 1 | 76 | #define MESH_ACTIVE 1 |
@@ -84,12 +85,13 @@ | |||
84 | #ifdef pr_fmt | 85 | #ifdef pr_fmt |
85 | #undef pr_fmt | 86 | #undef pr_fmt |
86 | #endif | 87 | #endif |
87 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt /* Append 'batman-adv: ' before | 88 | /* Append 'batman-adv: ' before kernel messages */ |
88 | * kernel messages */ | 89 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
89 | 90 | ||
90 | #define DBG_BATMAN 1 /* all messages related to routing / flooding / | 91 | /* all messages related to routing / flooding / broadcasting / etc */ |
91 | * broadcasting / etc */ | 92 | #define DBG_BATMAN 1 |
92 | #define DBG_ROUTES 2 /* route or hna added / changed / deleted */ | 93 | /* route or tt entry added / changed / deleted */ |
94 | #define DBG_ROUTES 2 | ||
93 | #define DBG_ALL 3 | 95 | #define DBG_ALL 3 |
94 | 96 | ||
95 | 97 | ||
diff --git a/net/batman-adv/originator.c b/net/batman-adv/originator.c index ef4a9be7613a..080ec88330a3 100644 --- a/net/batman-adv/originator.c +++ b/net/batman-adv/originator.c | |||
@@ -19,8 +19,6 @@ | |||
19 | * | 19 | * |
20 | */ | 20 | */ |
21 | 21 | ||
22 | /* increase the reference counter for this originator */ | ||
23 | |||
24 | #include "main.h" | 22 | #include "main.h" |
25 | #include "originator.h" | 23 | #include "originator.h" |
26 | #include "hash.h" | 24 | #include "hash.h" |
@@ -144,7 +142,7 @@ static void orig_node_free_rcu(struct rcu_head *rcu) | |||
144 | spin_unlock_bh(&orig_node->neigh_list_lock); | 142 | spin_unlock_bh(&orig_node->neigh_list_lock); |
145 | 143 | ||
146 | frag_list_free(&orig_node->frag_list); | 144 | frag_list_free(&orig_node->frag_list); |
147 | hna_global_del_orig(orig_node->bat_priv, orig_node, | 145 | tt_global_del_orig(orig_node->bat_priv, orig_node, |
148 | "originator timed out"); | 146 | "originator timed out"); |
149 | 147 | ||
150 | kfree(orig_node->bcast_own); | 148 | kfree(orig_node->bcast_own); |
@@ -222,7 +220,7 @@ struct orig_node *get_orig_node(struct bat_priv *bat_priv, uint8_t *addr) | |||
222 | orig_node->bat_priv = bat_priv; | 220 | orig_node->bat_priv = bat_priv; |
223 | memcpy(orig_node->orig, addr, ETH_ALEN); | 221 | memcpy(orig_node->orig, addr, ETH_ALEN); |
224 | orig_node->router = NULL; | 222 | orig_node->router = NULL; |
225 | orig_node->hna_buff = NULL; | 223 | orig_node->tt_buff = NULL; |
226 | orig_node->bcast_seqno_reset = jiffies - 1 | 224 | orig_node->bcast_seqno_reset = jiffies - 1 |
227 | - msecs_to_jiffies(RESET_PROTECTION_MS); | 225 | - msecs_to_jiffies(RESET_PROTECTION_MS); |
228 | orig_node->batman_seqno_reset = jiffies - 1 | 226 | orig_node->batman_seqno_reset = jiffies - 1 |
@@ -333,8 +331,8 @@ static bool purge_orig_node(struct bat_priv *bat_priv, | |||
333 | &best_neigh_node)) { | 331 | &best_neigh_node)) { |
334 | update_routes(bat_priv, orig_node, | 332 | update_routes(bat_priv, orig_node, |
335 | best_neigh_node, | 333 | best_neigh_node, |
336 | orig_node->hna_buff, | 334 | orig_node->tt_buff, |
337 | orig_node->hna_buff_len); | 335 | orig_node->tt_buff_len); |
338 | } | 336 | } |
339 | } | 337 | } |
340 | 338 | ||
diff --git a/net/batman-adv/packet.h b/net/batman-adv/packet.h index e7571879af3f..eda99650e9f8 100644 --- a/net/batman-adv/packet.h +++ b/net/batman-adv/packet.h | |||
@@ -61,7 +61,7 @@ struct batman_packet { | |||
61 | uint8_t orig[6]; | 61 | uint8_t orig[6]; |
62 | uint8_t prev_sender[6]; | 62 | uint8_t prev_sender[6]; |
63 | uint8_t ttl; | 63 | uint8_t ttl; |
64 | uint8_t num_hna; | 64 | uint8_t num_tt; |
65 | uint8_t gw_flags; /* flags related to gateway class */ | 65 | uint8_t gw_flags; /* flags related to gateway class */ |
66 | uint8_t align; | 66 | uint8_t align; |
67 | } __packed; | 67 | } __packed; |
@@ -128,8 +128,7 @@ struct vis_packet { | |||
128 | uint8_t entries; /* number of entries behind this struct */ | 128 | uint8_t entries; /* number of entries behind this struct */ |
129 | uint32_t seqno; /* sequence number */ | 129 | uint32_t seqno; /* sequence number */ |
130 | uint8_t ttl; /* TTL */ | 130 | uint8_t ttl; /* TTL */ |
131 | uint8_t vis_orig[6]; /* originator that informs about its | 131 | uint8_t vis_orig[6]; /* originator that announces its neighbors */ |
132 | * neighbors */ | ||
133 | uint8_t target_orig[6]; /* who should receive this packet */ | 132 | uint8_t target_orig[6]; /* who should receive this packet */ |
134 | uint8_t sender_orig[6]; /* who sent or rebroadcasted this packet */ | 133 | uint8_t sender_orig[6]; /* who sent or rebroadcasted this packet */ |
135 | } __packed; | 134 | } __packed; |
diff --git a/net/batman-adv/routing.c b/net/batman-adv/routing.c index 49f571553050..bb1c3ec7e3ff 100644 --- a/net/batman-adv/routing.c +++ b/net/batman-adv/routing.c | |||
@@ -64,28 +64,28 @@ void slide_own_bcast_window(struct hard_iface *hard_iface) | |||
64 | } | 64 | } |
65 | } | 65 | } |
66 | 66 | ||
67 | static void update_HNA(struct bat_priv *bat_priv, struct orig_node *orig_node, | 67 | static void update_TT(struct bat_priv *bat_priv, struct orig_node *orig_node, |
68 | unsigned char *hna_buff, int hna_buff_len) | 68 | unsigned char *tt_buff, int tt_buff_len) |
69 | { | 69 | { |
70 | if ((hna_buff_len != orig_node->hna_buff_len) || | 70 | if ((tt_buff_len != orig_node->tt_buff_len) || |
71 | ((hna_buff_len > 0) && | 71 | ((tt_buff_len > 0) && |
72 | (orig_node->hna_buff_len > 0) && | 72 | (orig_node->tt_buff_len > 0) && |
73 | (memcmp(orig_node->hna_buff, hna_buff, hna_buff_len) != 0))) { | 73 | (memcmp(orig_node->tt_buff, tt_buff, tt_buff_len) != 0))) { |
74 | 74 | ||
75 | if (orig_node->hna_buff_len > 0) | 75 | if (orig_node->tt_buff_len > 0) |
76 | hna_global_del_orig(bat_priv, orig_node, | 76 | tt_global_del_orig(bat_priv, orig_node, |
77 | "originator changed hna"); | 77 | "originator changed tt"); |
78 | 78 | ||
79 | if ((hna_buff_len > 0) && (hna_buff)) | 79 | if ((tt_buff_len > 0) && (tt_buff)) |
80 | hna_global_add_orig(bat_priv, orig_node, | 80 | tt_global_add_orig(bat_priv, orig_node, |
81 | hna_buff, hna_buff_len); | 81 | tt_buff, tt_buff_len); |
82 | } | 82 | } |
83 | } | 83 | } |
84 | 84 | ||
85 | static void update_route(struct bat_priv *bat_priv, | 85 | static void update_route(struct bat_priv *bat_priv, |
86 | struct orig_node *orig_node, | 86 | struct orig_node *orig_node, |
87 | struct neigh_node *neigh_node, | 87 | struct neigh_node *neigh_node, |
88 | unsigned char *hna_buff, int hna_buff_len) | 88 | unsigned char *tt_buff, int tt_buff_len) |
89 | { | 89 | { |
90 | struct neigh_node *curr_router; | 90 | struct neigh_node *curr_router; |
91 | 91 | ||
@@ -96,7 +96,7 @@ static void update_route(struct bat_priv *bat_priv, | |||
96 | 96 | ||
97 | bat_dbg(DBG_ROUTES, bat_priv, "Deleting route towards: %pM\n", | 97 | bat_dbg(DBG_ROUTES, bat_priv, "Deleting route towards: %pM\n", |
98 | orig_node->orig); | 98 | orig_node->orig); |
99 | hna_global_del_orig(bat_priv, orig_node, | 99 | tt_global_del_orig(bat_priv, orig_node, |
100 | "originator timed out"); | 100 | "originator timed out"); |
101 | 101 | ||
102 | /* route added */ | 102 | /* route added */ |
@@ -105,8 +105,8 @@ static void update_route(struct bat_priv *bat_priv, | |||
105 | bat_dbg(DBG_ROUTES, bat_priv, | 105 | bat_dbg(DBG_ROUTES, bat_priv, |
106 | "Adding route towards: %pM (via %pM)\n", | 106 | "Adding route towards: %pM (via %pM)\n", |
107 | orig_node->orig, neigh_node->addr); | 107 | orig_node->orig, neigh_node->addr); |
108 | hna_global_add_orig(bat_priv, orig_node, | 108 | tt_global_add_orig(bat_priv, orig_node, |
109 | hna_buff, hna_buff_len); | 109 | tt_buff, tt_buff_len); |
110 | 110 | ||
111 | /* route changed */ | 111 | /* route changed */ |
112 | } else { | 112 | } else { |
@@ -135,8 +135,8 @@ static void update_route(struct bat_priv *bat_priv, | |||
135 | 135 | ||
136 | 136 | ||
137 | void update_routes(struct bat_priv *bat_priv, struct orig_node *orig_node, | 137 | void update_routes(struct bat_priv *bat_priv, struct orig_node *orig_node, |
138 | struct neigh_node *neigh_node, unsigned char *hna_buff, | 138 | struct neigh_node *neigh_node, unsigned char *tt_buff, |
139 | int hna_buff_len) | 139 | int tt_buff_len) |
140 | { | 140 | { |
141 | struct neigh_node *router = NULL; | 141 | struct neigh_node *router = NULL; |
142 | 142 | ||
@@ -147,10 +147,10 @@ void update_routes(struct bat_priv *bat_priv, struct orig_node *orig_node, | |||
147 | 147 | ||
148 | if (router != neigh_node) | 148 | if (router != neigh_node) |
149 | update_route(bat_priv, orig_node, neigh_node, | 149 | update_route(bat_priv, orig_node, neigh_node, |
150 | hna_buff, hna_buff_len); | 150 | tt_buff, tt_buff_len); |
151 | /* may be just HNA changed */ | 151 | /* may be just TT changed */ |
152 | else | 152 | else |
153 | update_HNA(bat_priv, orig_node, hna_buff, hna_buff_len); | 153 | update_TT(bat_priv, orig_node, tt_buff, tt_buff_len); |
154 | 154 | ||
155 | out: | 155 | out: |
156 | if (router) | 156 | if (router) |
@@ -169,65 +169,41 @@ static int is_bidirectional_neigh(struct orig_node *orig_node, | |||
169 | uint8_t orig_eq_count, neigh_rq_count, tq_own; | 169 | uint8_t orig_eq_count, neigh_rq_count, tq_own; |
170 | int tq_asym_penalty, ret = 0; | 170 | int tq_asym_penalty, ret = 0; |
171 | 171 | ||
172 | if (orig_node == orig_neigh_node) { | 172 | /* find corresponding one hop neighbor */ |
173 | rcu_read_lock(); | 173 | rcu_read_lock(); |
174 | hlist_for_each_entry_rcu(tmp_neigh_node, node, | 174 | hlist_for_each_entry_rcu(tmp_neigh_node, node, |
175 | &orig_node->neigh_list, list) { | 175 | &orig_neigh_node->neigh_list, list) { |
176 | |||
177 | if (!compare_eth(tmp_neigh_node->addr, | ||
178 | orig_neigh_node->orig)) | ||
179 | continue; | ||
180 | |||
181 | if (tmp_neigh_node->if_incoming != if_incoming) | ||
182 | continue; | ||
183 | |||
184 | if (!atomic_inc_not_zero(&tmp_neigh_node->refcount)) | ||
185 | continue; | ||
186 | |||
187 | neigh_node = tmp_neigh_node; | ||
188 | } | ||
189 | rcu_read_unlock(); | ||
190 | 176 | ||
191 | if (!neigh_node) | 177 | if (!compare_eth(tmp_neigh_node->addr, orig_neigh_node->orig)) |
192 | neigh_node = create_neighbor(orig_node, | 178 | continue; |
193 | orig_neigh_node, | ||
194 | orig_neigh_node->orig, | ||
195 | if_incoming); | ||
196 | if (!neigh_node) | ||
197 | goto out; | ||
198 | 179 | ||
199 | neigh_node->last_valid = jiffies; | 180 | if (tmp_neigh_node->if_incoming != if_incoming) |
200 | } else { | 181 | continue; |
201 | /* find packet count of corresponding one hop neighbor */ | ||
202 | rcu_read_lock(); | ||
203 | hlist_for_each_entry_rcu(tmp_neigh_node, node, | ||
204 | &orig_neigh_node->neigh_list, list) { | ||
205 | 182 | ||
206 | if (!compare_eth(tmp_neigh_node->addr, | 183 | if (!atomic_inc_not_zero(&tmp_neigh_node->refcount)) |
207 | orig_neigh_node->orig)) | 184 | continue; |
208 | continue; | ||
209 | 185 | ||
210 | if (tmp_neigh_node->if_incoming != if_incoming) | 186 | neigh_node = tmp_neigh_node; |
211 | continue; | 187 | break; |
188 | } | ||
189 | rcu_read_unlock(); | ||
212 | 190 | ||
213 | if (!atomic_inc_not_zero(&tmp_neigh_node->refcount)) | 191 | if (!neigh_node) |
214 | continue; | 192 | neigh_node = create_neighbor(orig_neigh_node, |
193 | orig_neigh_node, | ||
194 | orig_neigh_node->orig, | ||
195 | if_incoming); | ||
215 | 196 | ||
216 | neigh_node = tmp_neigh_node; | 197 | if (!neigh_node) |
217 | } | 198 | goto out; |
218 | rcu_read_unlock(); | ||
219 | 199 | ||
220 | if (!neigh_node) | 200 | /* if orig_node is direct neighbour update neigh_node last_valid */ |
221 | neigh_node = create_neighbor(orig_neigh_node, | 201 | if (orig_node == orig_neigh_node) |
222 | orig_neigh_node, | 202 | neigh_node->last_valid = jiffies; |
223 | orig_neigh_node->orig, | ||
224 | if_incoming); | ||
225 | if (!neigh_node) | ||
226 | goto out; | ||
227 | } | ||
228 | 203 | ||
229 | orig_node->last_valid = jiffies; | 204 | orig_node->last_valid = jiffies; |
230 | 205 | ||
206 | /* find packet count of corresponding one hop neighbor */ | ||
231 | spin_lock_bh(&orig_node->ogm_cnt_lock); | 207 | spin_lock_bh(&orig_node->ogm_cnt_lock); |
232 | orig_eq_count = orig_neigh_node->bcast_own_sum[if_incoming->if_num]; | 208 | orig_eq_count = orig_neigh_node->bcast_own_sum[if_incoming->if_num]; |
233 | neigh_rq_count = neigh_node->real_packet_count; | 209 | neigh_rq_count = neigh_node->real_packet_count; |
@@ -387,14 +363,14 @@ static void update_orig(struct bat_priv *bat_priv, | |||
387 | struct ethhdr *ethhdr, | 363 | struct ethhdr *ethhdr, |
388 | struct batman_packet *batman_packet, | 364 | struct batman_packet *batman_packet, |
389 | struct hard_iface *if_incoming, | 365 | struct hard_iface *if_incoming, |
390 | unsigned char *hna_buff, int hna_buff_len, | 366 | unsigned char *tt_buff, int tt_buff_len, |
391 | char is_duplicate) | 367 | char is_duplicate) |
392 | { | 368 | { |
393 | struct neigh_node *neigh_node = NULL, *tmp_neigh_node = NULL; | 369 | struct neigh_node *neigh_node = NULL, *tmp_neigh_node = NULL; |
394 | struct neigh_node *router = NULL; | 370 | struct neigh_node *router = NULL; |
395 | struct orig_node *orig_node_tmp; | 371 | struct orig_node *orig_node_tmp; |
396 | struct hlist_node *node; | 372 | struct hlist_node *node; |
397 | int tmp_hna_buff_len; | 373 | int tmp_tt_buff_len; |
398 | uint8_t bcast_own_sum_orig, bcast_own_sum_neigh; | 374 | uint8_t bcast_own_sum_orig, bcast_own_sum_neigh; |
399 | 375 | ||
400 | bat_dbg(DBG_BATMAN, bat_priv, "update_originator(): " | 376 | bat_dbg(DBG_BATMAN, bat_priv, "update_originator(): " |
@@ -459,18 +435,18 @@ static void update_orig(struct bat_priv *bat_priv, | |||
459 | 435 | ||
460 | bonding_candidate_add(orig_node, neigh_node); | 436 | bonding_candidate_add(orig_node, neigh_node); |
461 | 437 | ||
462 | tmp_hna_buff_len = (hna_buff_len > batman_packet->num_hna * ETH_ALEN ? | 438 | tmp_tt_buff_len = (tt_buff_len > batman_packet->num_tt * ETH_ALEN ? |
463 | batman_packet->num_hna * ETH_ALEN : hna_buff_len); | 439 | batman_packet->num_tt * ETH_ALEN : tt_buff_len); |
464 | 440 | ||
465 | /* if this neighbor already is our next hop there is nothing | 441 | /* if this neighbor already is our next hop there is nothing |
466 | * to change */ | 442 | * to change */ |
467 | router = orig_node_get_router(orig_node); | 443 | router = orig_node_get_router(orig_node); |
468 | if (router == neigh_node) | 444 | if (router == neigh_node) |
469 | goto update_hna; | 445 | goto update_tt; |
470 | 446 | ||
471 | /* if this neighbor does not offer a better TQ we won't consider it */ | 447 | /* if this neighbor does not offer a better TQ we won't consider it */ |
472 | if (router && (router->tq_avg > neigh_node->tq_avg)) | 448 | if (router && (router->tq_avg > neigh_node->tq_avg)) |
473 | goto update_hna; | 449 | goto update_tt; |
474 | 450 | ||
475 | /* if the TQ is the same and the link not more symetric we | 451 | /* if the TQ is the same and the link not more symetric we |
476 | * won't consider it either */ | 452 | * won't consider it either */ |
@@ -488,16 +464,16 @@ static void update_orig(struct bat_priv *bat_priv, | |||
488 | spin_unlock_bh(&orig_node_tmp->ogm_cnt_lock); | 464 | spin_unlock_bh(&orig_node_tmp->ogm_cnt_lock); |
489 | 465 | ||
490 | if (bcast_own_sum_orig >= bcast_own_sum_neigh) | 466 | if (bcast_own_sum_orig >= bcast_own_sum_neigh) |
491 | goto update_hna; | 467 | goto update_tt; |
492 | } | 468 | } |
493 | 469 | ||
494 | update_routes(bat_priv, orig_node, neigh_node, | 470 | update_routes(bat_priv, orig_node, neigh_node, |
495 | hna_buff, tmp_hna_buff_len); | 471 | tt_buff, tmp_tt_buff_len); |
496 | goto update_gw; | 472 | goto update_gw; |
497 | 473 | ||
498 | update_hna: | 474 | update_tt: |
499 | update_routes(bat_priv, orig_node, router, | 475 | update_routes(bat_priv, orig_node, router, |
500 | hna_buff, tmp_hna_buff_len); | 476 | tt_buff, tmp_tt_buff_len); |
501 | 477 | ||
502 | update_gw: | 478 | update_gw: |
503 | if (orig_node->gw_flags != batman_packet->gw_flags) | 479 | if (orig_node->gw_flags != batman_packet->gw_flags) |
@@ -621,7 +597,7 @@ out: | |||
621 | 597 | ||
622 | void receive_bat_packet(struct ethhdr *ethhdr, | 598 | void receive_bat_packet(struct ethhdr *ethhdr, |
623 | struct batman_packet *batman_packet, | 599 | struct batman_packet *batman_packet, |
624 | unsigned char *hna_buff, int hna_buff_len, | 600 | unsigned char *tt_buff, int tt_buff_len, |
625 | struct hard_iface *if_incoming) | 601 | struct hard_iface *if_incoming) |
626 | { | 602 | { |
627 | struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface); | 603 | struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface); |
@@ -818,14 +794,14 @@ void receive_bat_packet(struct ethhdr *ethhdr, | |||
818 | ((orig_node->last_real_seqno == batman_packet->seqno) && | 794 | ((orig_node->last_real_seqno == batman_packet->seqno) && |
819 | (orig_node->last_ttl - 3 <= batman_packet->ttl)))) | 795 | (orig_node->last_ttl - 3 <= batman_packet->ttl)))) |
820 | update_orig(bat_priv, orig_node, ethhdr, batman_packet, | 796 | update_orig(bat_priv, orig_node, ethhdr, batman_packet, |
821 | if_incoming, hna_buff, hna_buff_len, is_duplicate); | 797 | if_incoming, tt_buff, tt_buff_len, is_duplicate); |
822 | 798 | ||
823 | /* is single hop (direct) neighbor */ | 799 | /* is single hop (direct) neighbor */ |
824 | if (is_single_hop_neigh) { | 800 | if (is_single_hop_neigh) { |
825 | 801 | ||
826 | /* mark direct link on incoming interface */ | 802 | /* mark direct link on incoming interface */ |
827 | schedule_forward_packet(orig_node, ethhdr, batman_packet, | 803 | schedule_forward_packet(orig_node, ethhdr, batman_packet, |
828 | 1, hna_buff_len, if_incoming); | 804 | 1, tt_buff_len, if_incoming); |
829 | 805 | ||
830 | bat_dbg(DBG_BATMAN, bat_priv, "Forwarding packet: " | 806 | bat_dbg(DBG_BATMAN, bat_priv, "Forwarding packet: " |
831 | "rebroadcast neighbor packet with direct link flag\n"); | 807 | "rebroadcast neighbor packet with direct link flag\n"); |
@@ -848,7 +824,7 @@ void receive_bat_packet(struct ethhdr *ethhdr, | |||
848 | bat_dbg(DBG_BATMAN, bat_priv, | 824 | bat_dbg(DBG_BATMAN, bat_priv, |
849 | "Forwarding packet: rebroadcast originator packet\n"); | 825 | "Forwarding packet: rebroadcast originator packet\n"); |
850 | schedule_forward_packet(orig_node, ethhdr, batman_packet, | 826 | schedule_forward_packet(orig_node, ethhdr, batman_packet, |
851 | 0, hna_buff_len, if_incoming); | 827 | 0, tt_buff_len, if_incoming); |
852 | 828 | ||
853 | out_neigh: | 829 | out_neigh: |
854 | if ((orig_neigh_node) && (!is_single_hop_neigh)) | 830 | if ((orig_neigh_node) && (!is_single_hop_neigh)) |
@@ -1213,7 +1189,7 @@ struct neigh_node *find_router(struct bat_priv *bat_priv, | |||
1213 | 1189 | ||
1214 | router = orig_node_get_router(orig_node); | 1190 | router = orig_node_get_router(orig_node); |
1215 | if (!router) | 1191 | if (!router) |
1216 | return NULL; | 1192 | goto err; |
1217 | 1193 | ||
1218 | /* without bonding, the first node should | 1194 | /* without bonding, the first node should |
1219 | * always choose the default router. */ | 1195 | * always choose the default router. */ |
@@ -1222,10 +1198,8 @@ struct neigh_node *find_router(struct bat_priv *bat_priv, | |||
1222 | rcu_read_lock(); | 1198 | rcu_read_lock(); |
1223 | /* select default router to output */ | 1199 | /* select default router to output */ |
1224 | router_orig = router->orig_node; | 1200 | router_orig = router->orig_node; |
1225 | if (!router_orig) { | 1201 | if (!router_orig) |
1226 | rcu_read_unlock(); | 1202 | goto err_unlock; |
1227 | return NULL; | ||
1228 | } | ||
1229 | 1203 | ||
1230 | if ((!recv_if) && (!bonding_enabled)) | 1204 | if ((!recv_if) && (!bonding_enabled)) |
1231 | goto return_router; | 1205 | goto return_router; |
@@ -1268,6 +1242,12 @@ struct neigh_node *find_router(struct bat_priv *bat_priv, | |||
1268 | return_router: | 1242 | return_router: |
1269 | rcu_read_unlock(); | 1243 | rcu_read_unlock(); |
1270 | return router; | 1244 | return router; |
1245 | err_unlock: | ||
1246 | rcu_read_unlock(); | ||
1247 | err: | ||
1248 | if (router) | ||
1249 | neigh_node_free_ref(router); | ||
1250 | return NULL; | ||
1271 | } | 1251 | } |
1272 | 1252 | ||
1273 | static int check_unicast_packet(struct sk_buff *skb, int hdr_size) | 1253 | static int check_unicast_packet(struct sk_buff *skb, int hdr_size) |
diff --git a/net/batman-adv/routing.h b/net/batman-adv/routing.h index b5a064c88a4f..870f29842b28 100644 --- a/net/batman-adv/routing.h +++ b/net/batman-adv/routing.h | |||
@@ -25,11 +25,11 @@ | |||
25 | void slide_own_bcast_window(struct hard_iface *hard_iface); | 25 | void slide_own_bcast_window(struct hard_iface *hard_iface); |
26 | void receive_bat_packet(struct ethhdr *ethhdr, | 26 | void receive_bat_packet(struct ethhdr *ethhdr, |
27 | struct batman_packet *batman_packet, | 27 | struct batman_packet *batman_packet, |
28 | unsigned char *hna_buff, int hna_buff_len, | 28 | unsigned char *tt_buff, int tt_buff_len, |
29 | struct hard_iface *if_incoming); | 29 | struct hard_iface *if_incoming); |
30 | void update_routes(struct bat_priv *bat_priv, struct orig_node *orig_node, | 30 | void update_routes(struct bat_priv *bat_priv, struct orig_node *orig_node, |
31 | struct neigh_node *neigh_node, unsigned char *hna_buff, | 31 | struct neigh_node *neigh_node, unsigned char *tt_buff, |
32 | int hna_buff_len); | 32 | int tt_buff_len); |
33 | int route_unicast_packet(struct sk_buff *skb, struct hard_iface *recv_if); | 33 | int route_unicast_packet(struct sk_buff *skb, struct hard_iface *recv_if); |
34 | int recv_icmp_packet(struct sk_buff *skb, struct hard_iface *recv_if); | 34 | int recv_icmp_packet(struct sk_buff *skb, struct hard_iface *recv_if); |
35 | int recv_unicast_packet(struct sk_buff *skb, struct hard_iface *recv_if); | 35 | int recv_unicast_packet(struct sk_buff *skb, struct hard_iface *recv_if); |
diff --git a/net/batman-adv/send.c b/net/batman-adv/send.c index 02b541a6dfef..f30d0c69ccbb 100644 --- a/net/batman-adv/send.c +++ b/net/batman-adv/send.c | |||
@@ -121,7 +121,7 @@ static void send_packet_to_if(struct forw_packet *forw_packet, | |||
121 | /* adjust all flags and log packets */ | 121 | /* adjust all flags and log packets */ |
122 | while (aggregated_packet(buff_pos, | 122 | while (aggregated_packet(buff_pos, |
123 | forw_packet->packet_len, | 123 | forw_packet->packet_len, |
124 | batman_packet->num_hna)) { | 124 | batman_packet->num_tt)) { |
125 | 125 | ||
126 | /* we might have aggregated direct link packets with an | 126 | /* we might have aggregated direct link packets with an |
127 | * ordinary base packet */ | 127 | * ordinary base packet */ |
@@ -146,7 +146,7 @@ static void send_packet_to_if(struct forw_packet *forw_packet, | |||
146 | hard_iface->net_dev->dev_addr); | 146 | hard_iface->net_dev->dev_addr); |
147 | 147 | ||
148 | buff_pos += sizeof(struct batman_packet) + | 148 | buff_pos += sizeof(struct batman_packet) + |
149 | (batman_packet->num_hna * ETH_ALEN); | 149 | (batman_packet->num_tt * ETH_ALEN); |
150 | packet_num++; | 150 | packet_num++; |
151 | batman_packet = (struct batman_packet *) | 151 | batman_packet = (struct batman_packet *) |
152 | (forw_packet->skb->data + buff_pos); | 152 | (forw_packet->skb->data + buff_pos); |
@@ -222,7 +222,7 @@ static void rebuild_batman_packet(struct bat_priv *bat_priv, | |||
222 | struct batman_packet *batman_packet; | 222 | struct batman_packet *batman_packet; |
223 | 223 | ||
224 | new_len = sizeof(struct batman_packet) + | 224 | new_len = sizeof(struct batman_packet) + |
225 | (bat_priv->num_local_hna * ETH_ALEN); | 225 | (bat_priv->num_local_tt * ETH_ALEN); |
226 | new_buff = kmalloc(new_len, GFP_ATOMIC); | 226 | new_buff = kmalloc(new_len, GFP_ATOMIC); |
227 | 227 | ||
228 | /* keep old buffer if kmalloc should fail */ | 228 | /* keep old buffer if kmalloc should fail */ |
@@ -231,7 +231,7 @@ static void rebuild_batman_packet(struct bat_priv *bat_priv, | |||
231 | sizeof(struct batman_packet)); | 231 | sizeof(struct batman_packet)); |
232 | batman_packet = (struct batman_packet *)new_buff; | 232 | batman_packet = (struct batman_packet *)new_buff; |
233 | 233 | ||
234 | batman_packet->num_hna = hna_local_fill_buffer(bat_priv, | 234 | batman_packet->num_tt = tt_local_fill_buffer(bat_priv, |
235 | new_buff + sizeof(struct batman_packet), | 235 | new_buff + sizeof(struct batman_packet), |
236 | new_len - sizeof(struct batman_packet)); | 236 | new_len - sizeof(struct batman_packet)); |
237 | 237 | ||
@@ -266,8 +266,8 @@ void schedule_own_packet(struct hard_iface *hard_iface) | |||
266 | if (hard_iface->if_status == IF_TO_BE_ACTIVATED) | 266 | if (hard_iface->if_status == IF_TO_BE_ACTIVATED) |
267 | hard_iface->if_status = IF_ACTIVE; | 267 | hard_iface->if_status = IF_ACTIVE; |
268 | 268 | ||
269 | /* if local hna has changed and interface is a primary interface */ | 269 | /* if local tt has changed and interface is a primary interface */ |
270 | if ((atomic_read(&bat_priv->hna_local_changed)) && | 270 | if ((atomic_read(&bat_priv->tt_local_changed)) && |
271 | (hard_iface == primary_if)) | 271 | (hard_iface == primary_if)) |
272 | rebuild_batman_packet(bat_priv, hard_iface); | 272 | rebuild_batman_packet(bat_priv, hard_iface); |
273 | 273 | ||
@@ -309,7 +309,7 @@ void schedule_own_packet(struct hard_iface *hard_iface) | |||
309 | void schedule_forward_packet(struct orig_node *orig_node, | 309 | void schedule_forward_packet(struct orig_node *orig_node, |
310 | struct ethhdr *ethhdr, | 310 | struct ethhdr *ethhdr, |
311 | struct batman_packet *batman_packet, | 311 | struct batman_packet *batman_packet, |
312 | uint8_t directlink, int hna_buff_len, | 312 | uint8_t directlink, int tt_buff_len, |
313 | struct hard_iface *if_incoming) | 313 | struct hard_iface *if_incoming) |
314 | { | 314 | { |
315 | struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface); | 315 | struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface); |
@@ -369,7 +369,7 @@ void schedule_forward_packet(struct orig_node *orig_node, | |||
369 | send_time = forward_send_time(); | 369 | send_time = forward_send_time(); |
370 | add_bat_packet_to_list(bat_priv, | 370 | add_bat_packet_to_list(bat_priv, |
371 | (unsigned char *)batman_packet, | 371 | (unsigned char *)batman_packet, |
372 | sizeof(struct batman_packet) + hna_buff_len, | 372 | sizeof(struct batman_packet) + tt_buff_len, |
373 | if_incoming, 0, send_time); | 373 | if_incoming, 0, send_time); |
374 | } | 374 | } |
375 | 375 | ||
diff --git a/net/batman-adv/send.h b/net/batman-adv/send.h index 7b2ff19c05e7..247172d71e4b 100644 --- a/net/batman-adv/send.h +++ b/net/batman-adv/send.h | |||
@@ -29,7 +29,7 @@ void schedule_own_packet(struct hard_iface *hard_iface); | |||
29 | void schedule_forward_packet(struct orig_node *orig_node, | 29 | void schedule_forward_packet(struct orig_node *orig_node, |
30 | struct ethhdr *ethhdr, | 30 | struct ethhdr *ethhdr, |
31 | struct batman_packet *batman_packet, | 31 | struct batman_packet *batman_packet, |
32 | uint8_t directlink, int hna_buff_len, | 32 | uint8_t directlink, int tt_buff_len, |
33 | struct hard_iface *if_outgoing); | 33 | struct hard_iface *if_outgoing); |
34 | int add_bcast_packet_to_list(struct bat_priv *bat_priv, struct sk_buff *skb); | 34 | int add_bcast_packet_to_list(struct bat_priv *bat_priv, struct sk_buff *skb); |
35 | void send_outstanding_bat_packet(struct work_struct *work); | 35 | void send_outstanding_bat_packet(struct work_struct *work); |
diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c index 9e5fcd1596cf..c76a33eeb3f1 100644 --- a/net/batman-adv/soft-interface.c +++ b/net/batman-adv/soft-interface.c | |||
@@ -86,135 +86,251 @@ static void softif_neigh_free_ref(struct softif_neigh *softif_neigh) | |||
86 | call_rcu(&softif_neigh->rcu, softif_neigh_free_rcu); | 86 | call_rcu(&softif_neigh->rcu, softif_neigh_free_rcu); |
87 | } | 87 | } |
88 | 88 | ||
89 | static struct softif_neigh *softif_neigh_get_selected(struct bat_priv *bat_priv) | 89 | static void softif_neigh_vid_free_rcu(struct rcu_head *rcu) |
90 | { | 90 | { |
91 | struct softif_neigh *neigh; | 91 | struct softif_neigh_vid *softif_neigh_vid; |
92 | 92 | struct softif_neigh *softif_neigh; | |
93 | rcu_read_lock(); | 93 | struct hlist_node *node, *node_tmp; |
94 | neigh = rcu_dereference(bat_priv->softif_neigh); | 94 | struct bat_priv *bat_priv; |
95 | |||
96 | if (neigh && !atomic_inc_not_zero(&neigh->refcount)) | ||
97 | neigh = NULL; | ||
98 | |||
99 | rcu_read_unlock(); | ||
100 | return neigh; | ||
101 | } | ||
102 | 95 | ||
103 | static void softif_neigh_select(struct bat_priv *bat_priv, | 96 | softif_neigh_vid = container_of(rcu, struct softif_neigh_vid, rcu); |
104 | struct softif_neigh *new_neigh) | 97 | bat_priv = softif_neigh_vid->bat_priv; |
105 | { | ||
106 | struct softif_neigh *curr_neigh; | ||
107 | 98 | ||
108 | spin_lock_bh(&bat_priv->softif_neigh_lock); | 99 | spin_lock_bh(&bat_priv->softif_neigh_lock); |
109 | 100 | hlist_for_each_entry_safe(softif_neigh, node, node_tmp, | |
110 | if (new_neigh && !atomic_inc_not_zero(&new_neigh->refcount)) | 101 | &softif_neigh_vid->softif_neigh_list, list) { |
111 | new_neigh = NULL; | 102 | hlist_del_rcu(&softif_neigh->list); |
112 | 103 | softif_neigh_free_ref(softif_neigh); | |
113 | curr_neigh = bat_priv->softif_neigh; | 104 | } |
114 | rcu_assign_pointer(bat_priv->softif_neigh, new_neigh); | ||
115 | |||
116 | if (curr_neigh) | ||
117 | softif_neigh_free_ref(curr_neigh); | ||
118 | |||
119 | spin_unlock_bh(&bat_priv->softif_neigh_lock); | 105 | spin_unlock_bh(&bat_priv->softif_neigh_lock); |
106 | |||
107 | kfree(softif_neigh_vid); | ||
120 | } | 108 | } |
121 | 109 | ||
122 | static void softif_neigh_deselect(struct bat_priv *bat_priv) | 110 | static void softif_neigh_vid_free_ref(struct softif_neigh_vid *softif_neigh_vid) |
123 | { | 111 | { |
124 | softif_neigh_select(bat_priv, NULL); | 112 | if (atomic_dec_and_test(&softif_neigh_vid->refcount)) |
113 | call_rcu(&softif_neigh_vid->rcu, softif_neigh_vid_free_rcu); | ||
125 | } | 114 | } |
126 | 115 | ||
127 | void softif_neigh_purge(struct bat_priv *bat_priv) | 116 | static struct softif_neigh_vid *softif_neigh_vid_get(struct bat_priv *bat_priv, |
117 | short vid) | ||
128 | { | 118 | { |
129 | struct softif_neigh *softif_neigh, *curr_softif_neigh; | 119 | struct softif_neigh_vid *softif_neigh_vid; |
130 | struct hlist_node *node, *node_tmp; | 120 | struct hlist_node *node; |
131 | char do_deselect = 0; | ||
132 | |||
133 | curr_softif_neigh = softif_neigh_get_selected(bat_priv); | ||
134 | |||
135 | spin_lock_bh(&bat_priv->softif_neigh_lock); | ||
136 | |||
137 | hlist_for_each_entry_safe(softif_neigh, node, node_tmp, | ||
138 | &bat_priv->softif_neigh_list, list) { | ||
139 | 121 | ||
140 | if ((!time_after(jiffies, softif_neigh->last_seen + | 122 | rcu_read_lock(); |
141 | msecs_to_jiffies(SOFTIF_NEIGH_TIMEOUT))) && | 123 | hlist_for_each_entry_rcu(softif_neigh_vid, node, |
142 | (atomic_read(&bat_priv->mesh_state) == MESH_ACTIVE)) | 124 | &bat_priv->softif_neigh_vids, list) { |
125 | if (softif_neigh_vid->vid != vid) | ||
143 | continue; | 126 | continue; |
144 | 127 | ||
145 | if (curr_softif_neigh == softif_neigh) { | 128 | if (!atomic_inc_not_zero(&softif_neigh_vid->refcount)) |
146 | bat_dbg(DBG_ROUTES, bat_priv, | 129 | continue; |
147 | "Current mesh exit point '%pM' vanished " | ||
148 | "(vid: %d).\n", | ||
149 | softif_neigh->addr, softif_neigh->vid); | ||
150 | do_deselect = 1; | ||
151 | } | ||
152 | 130 | ||
153 | hlist_del_rcu(&softif_neigh->list); | 131 | goto out; |
154 | softif_neigh_free_ref(softif_neigh); | ||
155 | } | 132 | } |
156 | 133 | ||
157 | spin_unlock_bh(&bat_priv->softif_neigh_lock); | 134 | softif_neigh_vid = kzalloc(sizeof(struct softif_neigh_vid), |
135 | GFP_ATOMIC); | ||
136 | if (!softif_neigh_vid) | ||
137 | goto out; | ||
158 | 138 | ||
159 | /* soft_neigh_deselect() needs to acquire the softif_neigh_lock */ | 139 | softif_neigh_vid->vid = vid; |
160 | if (do_deselect) | 140 | softif_neigh_vid->bat_priv = bat_priv; |
161 | softif_neigh_deselect(bat_priv); | ||
162 | 141 | ||
163 | if (curr_softif_neigh) | 142 | /* initialize with 2 - caller decrements counter by one */ |
164 | softif_neigh_free_ref(curr_softif_neigh); | 143 | atomic_set(&softif_neigh_vid->refcount, 2); |
144 | INIT_HLIST_HEAD(&softif_neigh_vid->softif_neigh_list); | ||
145 | INIT_HLIST_NODE(&softif_neigh_vid->list); | ||
146 | spin_lock_bh(&bat_priv->softif_neigh_vid_lock); | ||
147 | hlist_add_head_rcu(&softif_neigh_vid->list, | ||
148 | &bat_priv->softif_neigh_vids); | ||
149 | spin_unlock_bh(&bat_priv->softif_neigh_vid_lock); | ||
150 | |||
151 | out: | ||
152 | rcu_read_unlock(); | ||
153 | return softif_neigh_vid; | ||
165 | } | 154 | } |
166 | 155 | ||
167 | static struct softif_neigh *softif_neigh_get(struct bat_priv *bat_priv, | 156 | static struct softif_neigh *softif_neigh_get(struct bat_priv *bat_priv, |
168 | uint8_t *addr, short vid) | 157 | uint8_t *addr, short vid) |
169 | { | 158 | { |
170 | struct softif_neigh *softif_neigh; | 159 | struct softif_neigh_vid *softif_neigh_vid; |
160 | struct softif_neigh *softif_neigh = NULL; | ||
171 | struct hlist_node *node; | 161 | struct hlist_node *node; |
172 | 162 | ||
163 | softif_neigh_vid = softif_neigh_vid_get(bat_priv, vid); | ||
164 | if (!softif_neigh_vid) | ||
165 | goto out; | ||
166 | |||
173 | rcu_read_lock(); | 167 | rcu_read_lock(); |
174 | hlist_for_each_entry_rcu(softif_neigh, node, | 168 | hlist_for_each_entry_rcu(softif_neigh, node, |
175 | &bat_priv->softif_neigh_list, list) { | 169 | &softif_neigh_vid->softif_neigh_list, |
170 | list) { | ||
176 | if (!compare_eth(softif_neigh->addr, addr)) | 171 | if (!compare_eth(softif_neigh->addr, addr)) |
177 | continue; | 172 | continue; |
178 | 173 | ||
179 | if (softif_neigh->vid != vid) | ||
180 | continue; | ||
181 | |||
182 | if (!atomic_inc_not_zero(&softif_neigh->refcount)) | 174 | if (!atomic_inc_not_zero(&softif_neigh->refcount)) |
183 | continue; | 175 | continue; |
184 | 176 | ||
185 | softif_neigh->last_seen = jiffies; | 177 | softif_neigh->last_seen = jiffies; |
186 | goto out; | 178 | goto unlock; |
187 | } | 179 | } |
188 | 180 | ||
189 | softif_neigh = kzalloc(sizeof(struct softif_neigh), GFP_ATOMIC); | 181 | softif_neigh = kzalloc(sizeof(struct softif_neigh), GFP_ATOMIC); |
190 | if (!softif_neigh) | 182 | if (!softif_neigh) |
191 | goto out; | 183 | goto unlock; |
192 | 184 | ||
193 | memcpy(softif_neigh->addr, addr, ETH_ALEN); | 185 | memcpy(softif_neigh->addr, addr, ETH_ALEN); |
194 | softif_neigh->vid = vid; | ||
195 | softif_neigh->last_seen = jiffies; | 186 | softif_neigh->last_seen = jiffies; |
196 | /* initialize with 2 - caller decrements counter by one */ | 187 | /* initialize with 2 - caller decrements counter by one */ |
197 | atomic_set(&softif_neigh->refcount, 2); | 188 | atomic_set(&softif_neigh->refcount, 2); |
198 | 189 | ||
199 | INIT_HLIST_NODE(&softif_neigh->list); | 190 | INIT_HLIST_NODE(&softif_neigh->list); |
200 | spin_lock_bh(&bat_priv->softif_neigh_lock); | 191 | spin_lock_bh(&bat_priv->softif_neigh_lock); |
201 | hlist_add_head_rcu(&softif_neigh->list, &bat_priv->softif_neigh_list); | 192 | hlist_add_head_rcu(&softif_neigh->list, |
193 | &softif_neigh_vid->softif_neigh_list); | ||
202 | spin_unlock_bh(&bat_priv->softif_neigh_lock); | 194 | spin_unlock_bh(&bat_priv->softif_neigh_lock); |
203 | 195 | ||
196 | unlock: | ||
197 | rcu_read_unlock(); | ||
204 | out: | 198 | out: |
199 | if (softif_neigh_vid) | ||
200 | softif_neigh_vid_free_ref(softif_neigh_vid); | ||
201 | return softif_neigh; | ||
202 | } | ||
203 | |||
204 | static struct softif_neigh *softif_neigh_get_selected( | ||
205 | struct softif_neigh_vid *softif_neigh_vid) | ||
206 | { | ||
207 | struct softif_neigh *softif_neigh; | ||
208 | |||
209 | rcu_read_lock(); | ||
210 | softif_neigh = rcu_dereference(softif_neigh_vid->softif_neigh); | ||
211 | |||
212 | if (softif_neigh && !atomic_inc_not_zero(&softif_neigh->refcount)) | ||
213 | softif_neigh = NULL; | ||
214 | |||
205 | rcu_read_unlock(); | 215 | rcu_read_unlock(); |
206 | return softif_neigh; | 216 | return softif_neigh; |
207 | } | 217 | } |
208 | 218 | ||
219 | static struct softif_neigh *softif_neigh_vid_get_selected( | ||
220 | struct bat_priv *bat_priv, | ||
221 | short vid) | ||
222 | { | ||
223 | struct softif_neigh_vid *softif_neigh_vid; | ||
224 | struct softif_neigh *softif_neigh = NULL; | ||
225 | |||
226 | softif_neigh_vid = softif_neigh_vid_get(bat_priv, vid); | ||
227 | if (!softif_neigh_vid) | ||
228 | goto out; | ||
229 | |||
230 | softif_neigh = softif_neigh_get_selected(softif_neigh_vid); | ||
231 | out: | ||
232 | if (softif_neigh_vid) | ||
233 | softif_neigh_vid_free_ref(softif_neigh_vid); | ||
234 | return softif_neigh; | ||
235 | } | ||
236 | |||
237 | static void softif_neigh_vid_select(struct bat_priv *bat_priv, | ||
238 | struct softif_neigh *new_neigh, | ||
239 | short vid) | ||
240 | { | ||
241 | struct softif_neigh_vid *softif_neigh_vid; | ||
242 | struct softif_neigh *curr_neigh; | ||
243 | |||
244 | softif_neigh_vid = softif_neigh_vid_get(bat_priv, vid); | ||
245 | if (!softif_neigh_vid) | ||
246 | goto out; | ||
247 | |||
248 | spin_lock_bh(&bat_priv->softif_neigh_lock); | ||
249 | |||
250 | if (new_neigh && !atomic_inc_not_zero(&new_neigh->refcount)) | ||
251 | new_neigh = NULL; | ||
252 | |||
253 | curr_neigh = softif_neigh_vid->softif_neigh; | ||
254 | rcu_assign_pointer(softif_neigh_vid->softif_neigh, new_neigh); | ||
255 | |||
256 | if ((curr_neigh) && (!new_neigh)) | ||
257 | bat_dbg(DBG_ROUTES, bat_priv, | ||
258 | "Removing mesh exit point on vid: %d (prev: %pM).\n", | ||
259 | vid, curr_neigh->addr); | ||
260 | else if ((curr_neigh) && (new_neigh)) | ||
261 | bat_dbg(DBG_ROUTES, bat_priv, | ||
262 | "Changing mesh exit point on vid: %d from %pM " | ||
263 | "to %pM.\n", vid, curr_neigh->addr, new_neigh->addr); | ||
264 | else if ((!curr_neigh) && (new_neigh)) | ||
265 | bat_dbg(DBG_ROUTES, bat_priv, | ||
266 | "Setting mesh exit point on vid: %d to %pM.\n", | ||
267 | vid, new_neigh->addr); | ||
268 | |||
269 | if (curr_neigh) | ||
270 | softif_neigh_free_ref(curr_neigh); | ||
271 | |||
272 | spin_unlock_bh(&bat_priv->softif_neigh_lock); | ||
273 | |||
274 | out: | ||
275 | if (softif_neigh_vid) | ||
276 | softif_neigh_vid_free_ref(softif_neigh_vid); | ||
277 | } | ||
278 | |||
279 | static void softif_neigh_vid_deselect(struct bat_priv *bat_priv, | ||
280 | struct softif_neigh_vid *softif_neigh_vid) | ||
281 | { | ||
282 | struct softif_neigh *curr_neigh; | ||
283 | struct softif_neigh *softif_neigh = NULL, *softif_neigh_tmp; | ||
284 | struct hard_iface *primary_if = NULL; | ||
285 | struct hlist_node *node; | ||
286 | |||
287 | primary_if = primary_if_get_selected(bat_priv); | ||
288 | if (!primary_if) | ||
289 | goto out; | ||
290 | |||
291 | /* find new softif_neigh immediately to avoid temporary loops */ | ||
292 | rcu_read_lock(); | ||
293 | curr_neigh = rcu_dereference(softif_neigh_vid->softif_neigh); | ||
294 | |||
295 | hlist_for_each_entry_rcu(softif_neigh_tmp, node, | ||
296 | &softif_neigh_vid->softif_neigh_list, | ||
297 | list) { | ||
298 | if (softif_neigh_tmp == curr_neigh) | ||
299 | continue; | ||
300 | |||
301 | /* we got a neighbor but its mac is 'bigger' than ours */ | ||
302 | if (memcmp(primary_if->net_dev->dev_addr, | ||
303 | softif_neigh_tmp->addr, ETH_ALEN) < 0) | ||
304 | continue; | ||
305 | |||
306 | if (!atomic_inc_not_zero(&softif_neigh_tmp->refcount)) | ||
307 | continue; | ||
308 | |||
309 | softif_neigh = softif_neigh_tmp; | ||
310 | goto unlock; | ||
311 | } | ||
312 | |||
313 | unlock: | ||
314 | rcu_read_unlock(); | ||
315 | out: | ||
316 | softif_neigh_vid_select(bat_priv, softif_neigh, softif_neigh_vid->vid); | ||
317 | |||
318 | if (primary_if) | ||
319 | hardif_free_ref(primary_if); | ||
320 | if (softif_neigh) | ||
321 | softif_neigh_free_ref(softif_neigh); | ||
322 | } | ||
323 | |||
209 | int softif_neigh_seq_print_text(struct seq_file *seq, void *offset) | 324 | int softif_neigh_seq_print_text(struct seq_file *seq, void *offset) |
210 | { | 325 | { |
211 | struct net_device *net_dev = (struct net_device *)seq->private; | 326 | struct net_device *net_dev = (struct net_device *)seq->private; |
212 | struct bat_priv *bat_priv = netdev_priv(net_dev); | 327 | struct bat_priv *bat_priv = netdev_priv(net_dev); |
328 | struct softif_neigh_vid *softif_neigh_vid; | ||
213 | struct softif_neigh *softif_neigh; | 329 | struct softif_neigh *softif_neigh; |
214 | struct hard_iface *primary_if; | 330 | struct hard_iface *primary_if; |
215 | struct hlist_node *node; | 331 | struct hlist_node *node, *node_tmp; |
216 | struct softif_neigh *curr_softif_neigh; | 332 | struct softif_neigh *curr_softif_neigh; |
217 | int ret = 0; | 333 | int ret = 0, last_seen_secs, last_seen_msecs; |
218 | 334 | ||
219 | primary_if = primary_if_get_selected(bat_priv); | 335 | primary_if = primary_if_get_selected(bat_priv); |
220 | if (!primary_if) { | 336 | if (!primary_if) { |
@@ -233,17 +349,33 @@ int softif_neigh_seq_print_text(struct seq_file *seq, void *offset) | |||
233 | 349 | ||
234 | seq_printf(seq, "Softif neighbor list (%s)\n", net_dev->name); | 350 | seq_printf(seq, "Softif neighbor list (%s)\n", net_dev->name); |
235 | 351 | ||
236 | curr_softif_neigh = softif_neigh_get_selected(bat_priv); | ||
237 | rcu_read_lock(); | 352 | rcu_read_lock(); |
238 | hlist_for_each_entry_rcu(softif_neigh, node, | 353 | hlist_for_each_entry_rcu(softif_neigh_vid, node, |
239 | &bat_priv->softif_neigh_list, list) | 354 | &bat_priv->softif_neigh_vids, list) { |
240 | seq_printf(seq, "%s %pM (vid: %d)\n", | 355 | seq_printf(seq, " %-15s %s on vid: %d\n", |
241 | curr_softif_neigh == softif_neigh | 356 | "Originator", "last-seen", softif_neigh_vid->vid); |
242 | ? "=>" : " ", softif_neigh->addr, | 357 | |
243 | softif_neigh->vid); | 358 | curr_softif_neigh = softif_neigh_get_selected(softif_neigh_vid); |
359 | |||
360 | hlist_for_each_entry_rcu(softif_neigh, node_tmp, | ||
361 | &softif_neigh_vid->softif_neigh_list, | ||
362 | list) { | ||
363 | last_seen_secs = jiffies_to_msecs(jiffies - | ||
364 | softif_neigh->last_seen) / 1000; | ||
365 | last_seen_msecs = jiffies_to_msecs(jiffies - | ||
366 | softif_neigh->last_seen) % 1000; | ||
367 | seq_printf(seq, "%s %pM %3i.%03is\n", | ||
368 | curr_softif_neigh == softif_neigh | ||
369 | ? "=>" : " ", softif_neigh->addr, | ||
370 | last_seen_secs, last_seen_msecs); | ||
371 | } | ||
372 | |||
373 | if (curr_softif_neigh) | ||
374 | softif_neigh_free_ref(curr_softif_neigh); | ||
375 | |||
376 | seq_printf(seq, "\n"); | ||
377 | } | ||
244 | rcu_read_unlock(); | 378 | rcu_read_unlock(); |
245 | if (curr_softif_neigh) | ||
246 | softif_neigh_free_ref(curr_softif_neigh); | ||
247 | 379 | ||
248 | out: | 380 | out: |
249 | if (primary_if) | 381 | if (primary_if) |
@@ -251,6 +383,70 @@ out: | |||
251 | return ret; | 383 | return ret; |
252 | } | 384 | } |
253 | 385 | ||
386 | void softif_neigh_purge(struct bat_priv *bat_priv) | ||
387 | { | ||
388 | struct softif_neigh *softif_neigh, *curr_softif_neigh; | ||
389 | struct softif_neigh_vid *softif_neigh_vid; | ||
390 | struct hlist_node *node, *node_tmp, *node_tmp2; | ||
391 | char do_deselect; | ||
392 | |||
393 | rcu_read_lock(); | ||
394 | hlist_for_each_entry_rcu(softif_neigh_vid, node, | ||
395 | &bat_priv->softif_neigh_vids, list) { | ||
396 | if (!atomic_inc_not_zero(&softif_neigh_vid->refcount)) | ||
397 | continue; | ||
398 | |||
399 | curr_softif_neigh = softif_neigh_get_selected(softif_neigh_vid); | ||
400 | do_deselect = 0; | ||
401 | |||
402 | spin_lock_bh(&bat_priv->softif_neigh_lock); | ||
403 | hlist_for_each_entry_safe(softif_neigh, node_tmp, node_tmp2, | ||
404 | &softif_neigh_vid->softif_neigh_list, | ||
405 | list) { | ||
406 | if ((!time_after(jiffies, softif_neigh->last_seen + | ||
407 | msecs_to_jiffies(SOFTIF_NEIGH_TIMEOUT))) && | ||
408 | (atomic_read(&bat_priv->mesh_state) == MESH_ACTIVE)) | ||
409 | continue; | ||
410 | |||
411 | if (curr_softif_neigh == softif_neigh) { | ||
412 | bat_dbg(DBG_ROUTES, bat_priv, | ||
413 | "Current mesh exit point on vid: %d " | ||
414 | "'%pM' vanished.\n", | ||
415 | softif_neigh_vid->vid, | ||
416 | softif_neigh->addr); | ||
417 | do_deselect = 1; | ||
418 | } | ||
419 | |||
420 | hlist_del_rcu(&softif_neigh->list); | ||
421 | softif_neigh_free_ref(softif_neigh); | ||
422 | } | ||
423 | spin_unlock_bh(&bat_priv->softif_neigh_lock); | ||
424 | |||
425 | /* soft_neigh_vid_deselect() needs to acquire the | ||
426 | * softif_neigh_lock */ | ||
427 | if (do_deselect) | ||
428 | softif_neigh_vid_deselect(bat_priv, softif_neigh_vid); | ||
429 | |||
430 | if (curr_softif_neigh) | ||
431 | softif_neigh_free_ref(curr_softif_neigh); | ||
432 | |||
433 | softif_neigh_vid_free_ref(softif_neigh_vid); | ||
434 | } | ||
435 | rcu_read_unlock(); | ||
436 | |||
437 | spin_lock_bh(&bat_priv->softif_neigh_vid_lock); | ||
438 | hlist_for_each_entry_safe(softif_neigh_vid, node, node_tmp, | ||
439 | &bat_priv->softif_neigh_vids, list) { | ||
440 | if (!hlist_empty(&softif_neigh_vid->softif_neigh_list)) | ||
441 | continue; | ||
442 | |||
443 | hlist_del_rcu(&softif_neigh_vid->list); | ||
444 | softif_neigh_vid_free_ref(softif_neigh_vid); | ||
445 | } | ||
446 | spin_unlock_bh(&bat_priv->softif_neigh_vid_lock); | ||
447 | |||
448 | } | ||
449 | |||
254 | static void softif_batman_recv(struct sk_buff *skb, struct net_device *dev, | 450 | static void softif_batman_recv(struct sk_buff *skb, struct net_device *dev, |
255 | short vid) | 451 | short vid) |
256 | { | 452 | { |
@@ -283,10 +479,7 @@ static void softif_batman_recv(struct sk_buff *skb, struct net_device *dev, | |||
283 | if (!softif_neigh) | 479 | if (!softif_neigh) |
284 | goto out; | 480 | goto out; |
285 | 481 | ||
286 | curr_softif_neigh = softif_neigh_get_selected(bat_priv); | 482 | curr_softif_neigh = softif_neigh_vid_get_selected(bat_priv, vid); |
287 | if (!curr_softif_neigh) | ||
288 | goto out; | ||
289 | |||
290 | if (curr_softif_neigh == softif_neigh) | 483 | if (curr_softif_neigh == softif_neigh) |
291 | goto out; | 484 | goto out; |
292 | 485 | ||
@@ -299,33 +492,16 @@ static void softif_batman_recv(struct sk_buff *skb, struct net_device *dev, | |||
299 | softif_neigh->addr, ETH_ALEN) < 0) | 492 | softif_neigh->addr, ETH_ALEN) < 0) |
300 | goto out; | 493 | goto out; |
301 | 494 | ||
302 | /* switch to new 'smallest neighbor' */ | ||
303 | if ((curr_softif_neigh) && | ||
304 | (memcmp(softif_neigh->addr, curr_softif_neigh->addr, | ||
305 | ETH_ALEN) < 0)) { | ||
306 | bat_dbg(DBG_ROUTES, bat_priv, | ||
307 | "Changing mesh exit point from %pM (vid: %d) " | ||
308 | "to %pM (vid: %d).\n", | ||
309 | curr_softif_neigh->addr, | ||
310 | curr_softif_neigh->vid, | ||
311 | softif_neigh->addr, softif_neigh->vid); | ||
312 | |||
313 | softif_neigh_select(bat_priv, softif_neigh); | ||
314 | goto out; | ||
315 | } | ||
316 | |||
317 | /* close own batX device and use softif_neigh as exit node */ | 495 | /* close own batX device and use softif_neigh as exit node */ |
318 | if ((!curr_softif_neigh) && | 496 | if (!curr_softif_neigh) { |
319 | (memcmp(softif_neigh->addr, | 497 | softif_neigh_vid_select(bat_priv, softif_neigh, vid); |
320 | primary_if->net_dev->dev_addr, ETH_ALEN) < 0)) { | ||
321 | bat_dbg(DBG_ROUTES, bat_priv, | ||
322 | "Setting mesh exit point to %pM (vid: %d).\n", | ||
323 | softif_neigh->addr, softif_neigh->vid); | ||
324 | |||
325 | softif_neigh_select(bat_priv, softif_neigh); | ||
326 | goto out; | 498 | goto out; |
327 | } | 499 | } |
328 | 500 | ||
501 | /* switch to new 'smallest neighbor' */ | ||
502 | if (memcmp(softif_neigh->addr, curr_softif_neigh->addr, ETH_ALEN) < 0) | ||
503 | softif_neigh_vid_select(bat_priv, softif_neigh, vid); | ||
504 | |||
329 | out: | 505 | out: |
330 | kfree_skb(skb); | 506 | kfree_skb(skb); |
331 | if (softif_neigh) | 507 | if (softif_neigh) |
@@ -363,11 +539,11 @@ static int interface_set_mac_addr(struct net_device *dev, void *p) | |||
363 | if (!is_valid_ether_addr(addr->sa_data)) | 539 | if (!is_valid_ether_addr(addr->sa_data)) |
364 | return -EADDRNOTAVAIL; | 540 | return -EADDRNOTAVAIL; |
365 | 541 | ||
366 | /* only modify hna-table if it has been initialised before */ | 542 | /* only modify transtable if it has been initialised before */ |
367 | if (atomic_read(&bat_priv->mesh_state) == MESH_ACTIVE) { | 543 | if (atomic_read(&bat_priv->mesh_state) == MESH_ACTIVE) { |
368 | hna_local_remove(bat_priv, dev->dev_addr, | 544 | tt_local_remove(bat_priv, dev->dev_addr, |
369 | "mac address changed"); | 545 | "mac address changed"); |
370 | hna_local_add(dev, addr->sa_data); | 546 | tt_local_add(dev, addr->sa_data); |
371 | } | 547 | } |
372 | 548 | ||
373 | memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN); | 549 | memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN); |
@@ -420,12 +596,12 @@ int interface_tx(struct sk_buff *skb, struct net_device *soft_iface) | |||
420 | * if we have a another chosen mesh exit node in range | 596 | * if we have a another chosen mesh exit node in range |
421 | * it will transport the packets to the mesh | 597 | * it will transport the packets to the mesh |
422 | */ | 598 | */ |
423 | curr_softif_neigh = softif_neigh_get_selected(bat_priv); | 599 | curr_softif_neigh = softif_neigh_vid_get_selected(bat_priv, vid); |
424 | if ((curr_softif_neigh) && (curr_softif_neigh->vid == vid)) | 600 | if (curr_softif_neigh) |
425 | goto dropped; | 601 | goto dropped; |
426 | 602 | ||
427 | /* TODO: check this for locks */ | 603 | /* TODO: check this for locks */ |
428 | hna_local_add(soft_iface, ethhdr->h_source); | 604 | tt_local_add(soft_iface, ethhdr->h_source); |
429 | 605 | ||
430 | if (is_multicast_ether_addr(ethhdr->h_dest)) { | 606 | if (is_multicast_ether_addr(ethhdr->h_dest)) { |
431 | ret = gw_is_target(bat_priv, skb); | 607 | ret = gw_is_target(bat_priv, skb); |
@@ -529,8 +705,8 @@ void interface_rx(struct net_device *soft_iface, | |||
529 | * if we have a another chosen mesh exit node in range | 705 | * if we have a another chosen mesh exit node in range |
530 | * it will transport the packets to the non-mesh network | 706 | * it will transport the packets to the non-mesh network |
531 | */ | 707 | */ |
532 | curr_softif_neigh = softif_neigh_get_selected(bat_priv); | 708 | curr_softif_neigh = softif_neigh_vid_get_selected(bat_priv, vid); |
533 | if (curr_softif_neigh && (curr_softif_neigh->vid == vid)) { | 709 | if (curr_softif_neigh) { |
534 | skb_push(skb, hdr_size); | 710 | skb_push(skb, hdr_size); |
535 | unicast_packet = (struct unicast_packet *)skb->data; | 711 | unicast_packet = (struct unicast_packet *)skb->data; |
536 | 712 | ||
@@ -613,8 +789,8 @@ static void interface_setup(struct net_device *dev) | |||
613 | * have not been initialized yet | 789 | * have not been initialized yet |
614 | */ | 790 | */ |
615 | dev->mtu = ETH_DATA_LEN; | 791 | dev->mtu = ETH_DATA_LEN; |
616 | dev->hard_header_len = BAT_HEADER_LEN; /* reserve more space in the | 792 | /* reserve more space in the skbuff for our header */ |
617 | * skbuff for our header */ | 793 | dev->hard_header_len = BAT_HEADER_LEN; |
618 | 794 | ||
619 | /* generate random address */ | 795 | /* generate random address */ |
620 | random_ether_addr(dev_addr); | 796 | random_ether_addr(dev_addr); |
@@ -639,7 +815,7 @@ struct net_device *softif_create(char *name) | |||
639 | goto out; | 815 | goto out; |
640 | } | 816 | } |
641 | 817 | ||
642 | ret = register_netdev(soft_iface); | 818 | ret = register_netdevice(soft_iface); |
643 | if (ret < 0) { | 819 | if (ret < 0) { |
644 | pr_err("Unable to register the batman interface '%s': %i\n", | 820 | pr_err("Unable to register the batman interface '%s': %i\n", |
645 | name, ret); | 821 | name, ret); |
@@ -663,11 +839,10 @@ struct net_device *softif_create(char *name) | |||
663 | 839 | ||
664 | atomic_set(&bat_priv->mesh_state, MESH_INACTIVE); | 840 | atomic_set(&bat_priv->mesh_state, MESH_INACTIVE); |
665 | atomic_set(&bat_priv->bcast_seqno, 1); | 841 | atomic_set(&bat_priv->bcast_seqno, 1); |
666 | atomic_set(&bat_priv->hna_local_changed, 0); | 842 | atomic_set(&bat_priv->tt_local_changed, 0); |
667 | 843 | ||
668 | bat_priv->primary_if = NULL; | 844 | bat_priv->primary_if = NULL; |
669 | bat_priv->num_ifaces = 0; | 845 | bat_priv->num_ifaces = 0; |
670 | bat_priv->softif_neigh = NULL; | ||
671 | 846 | ||
672 | ret = sysfs_add_meshif(soft_iface); | 847 | ret = sysfs_add_meshif(soft_iface); |
673 | if (ret < 0) | 848 | if (ret < 0) |
diff --git a/net/batman-adv/translation-table.c b/net/batman-adv/translation-table.c index f931830d630e..7b729660cbfd 100644 --- a/net/batman-adv/translation-table.c +++ b/net/batman-adv/translation-table.c | |||
@@ -26,40 +26,40 @@ | |||
26 | #include "hash.h" | 26 | #include "hash.h" |
27 | #include "originator.h" | 27 | #include "originator.h" |
28 | 28 | ||
29 | static void hna_local_purge(struct work_struct *work); | 29 | static void tt_local_purge(struct work_struct *work); |
30 | static void _hna_global_del_orig(struct bat_priv *bat_priv, | 30 | static void _tt_global_del_orig(struct bat_priv *bat_priv, |
31 | struct hna_global_entry *hna_global_entry, | 31 | struct tt_global_entry *tt_global_entry, |
32 | char *message); | 32 | char *message); |
33 | 33 | ||
34 | /* returns 1 if they are the same mac addr */ | 34 | /* returns 1 if they are the same mac addr */ |
35 | static int compare_lhna(struct hlist_node *node, void *data2) | 35 | static int compare_ltt(struct hlist_node *node, void *data2) |
36 | { | 36 | { |
37 | void *data1 = container_of(node, struct hna_local_entry, hash_entry); | 37 | void *data1 = container_of(node, struct tt_local_entry, hash_entry); |
38 | 38 | ||
39 | return (memcmp(data1, data2, ETH_ALEN) == 0 ? 1 : 0); | 39 | return (memcmp(data1, data2, ETH_ALEN) == 0 ? 1 : 0); |
40 | } | 40 | } |
41 | 41 | ||
42 | /* returns 1 if they are the same mac addr */ | 42 | /* returns 1 if they are the same mac addr */ |
43 | static int compare_ghna(struct hlist_node *node, void *data2) | 43 | static int compare_gtt(struct hlist_node *node, void *data2) |
44 | { | 44 | { |
45 | void *data1 = container_of(node, struct hna_global_entry, hash_entry); | 45 | void *data1 = container_of(node, struct tt_global_entry, hash_entry); |
46 | 46 | ||
47 | return (memcmp(data1, data2, ETH_ALEN) == 0 ? 1 : 0); | 47 | return (memcmp(data1, data2, ETH_ALEN) == 0 ? 1 : 0); |
48 | } | 48 | } |
49 | 49 | ||
50 | static void hna_local_start_timer(struct bat_priv *bat_priv) | 50 | static void tt_local_start_timer(struct bat_priv *bat_priv) |
51 | { | 51 | { |
52 | INIT_DELAYED_WORK(&bat_priv->hna_work, hna_local_purge); | 52 | INIT_DELAYED_WORK(&bat_priv->tt_work, tt_local_purge); |
53 | queue_delayed_work(bat_event_workqueue, &bat_priv->hna_work, 10 * HZ); | 53 | queue_delayed_work(bat_event_workqueue, &bat_priv->tt_work, 10 * HZ); |
54 | } | 54 | } |
55 | 55 | ||
56 | static struct hna_local_entry *hna_local_hash_find(struct bat_priv *bat_priv, | 56 | static struct tt_local_entry *tt_local_hash_find(struct bat_priv *bat_priv, |
57 | void *data) | 57 | void *data) |
58 | { | 58 | { |
59 | struct hashtable_t *hash = bat_priv->hna_local_hash; | 59 | struct hashtable_t *hash = bat_priv->tt_local_hash; |
60 | struct hlist_head *head; | 60 | struct hlist_head *head; |
61 | struct hlist_node *node; | 61 | struct hlist_node *node; |
62 | struct hna_local_entry *hna_local_entry, *hna_local_entry_tmp = NULL; | 62 | struct tt_local_entry *tt_local_entry, *tt_local_entry_tmp = NULL; |
63 | int index; | 63 | int index; |
64 | 64 | ||
65 | if (!hash) | 65 | if (!hash) |
@@ -69,26 +69,26 @@ static struct hna_local_entry *hna_local_hash_find(struct bat_priv *bat_priv, | |||
69 | head = &hash->table[index]; | 69 | head = &hash->table[index]; |
70 | 70 | ||
71 | rcu_read_lock(); | 71 | rcu_read_lock(); |
72 | hlist_for_each_entry_rcu(hna_local_entry, node, head, hash_entry) { | 72 | hlist_for_each_entry_rcu(tt_local_entry, node, head, hash_entry) { |
73 | if (!compare_eth(hna_local_entry, data)) | 73 | if (!compare_eth(tt_local_entry, data)) |
74 | continue; | 74 | continue; |
75 | 75 | ||
76 | hna_local_entry_tmp = hna_local_entry; | 76 | tt_local_entry_tmp = tt_local_entry; |
77 | break; | 77 | break; |
78 | } | 78 | } |
79 | rcu_read_unlock(); | 79 | rcu_read_unlock(); |
80 | 80 | ||
81 | return hna_local_entry_tmp; | 81 | return tt_local_entry_tmp; |
82 | } | 82 | } |
83 | 83 | ||
84 | static struct hna_global_entry *hna_global_hash_find(struct bat_priv *bat_priv, | 84 | static struct tt_global_entry *tt_global_hash_find(struct bat_priv *bat_priv, |
85 | void *data) | 85 | void *data) |
86 | { | 86 | { |
87 | struct hashtable_t *hash = bat_priv->hna_global_hash; | 87 | struct hashtable_t *hash = bat_priv->tt_global_hash; |
88 | struct hlist_head *head; | 88 | struct hlist_head *head; |
89 | struct hlist_node *node; | 89 | struct hlist_node *node; |
90 | struct hna_global_entry *hna_global_entry; | 90 | struct tt_global_entry *tt_global_entry; |
91 | struct hna_global_entry *hna_global_entry_tmp = NULL; | 91 | struct tt_global_entry *tt_global_entry_tmp = NULL; |
92 | int index; | 92 | int index; |
93 | 93 | ||
94 | if (!hash) | 94 | if (!hash) |
@@ -98,125 +98,125 @@ static struct hna_global_entry *hna_global_hash_find(struct bat_priv *bat_priv, | |||
98 | head = &hash->table[index]; | 98 | head = &hash->table[index]; |
99 | 99 | ||
100 | rcu_read_lock(); | 100 | rcu_read_lock(); |
101 | hlist_for_each_entry_rcu(hna_global_entry, node, head, hash_entry) { | 101 | hlist_for_each_entry_rcu(tt_global_entry, node, head, hash_entry) { |
102 | if (!compare_eth(hna_global_entry, data)) | 102 | if (!compare_eth(tt_global_entry, data)) |
103 | continue; | 103 | continue; |
104 | 104 | ||
105 | hna_global_entry_tmp = hna_global_entry; | 105 | tt_global_entry_tmp = tt_global_entry; |
106 | break; | 106 | break; |
107 | } | 107 | } |
108 | rcu_read_unlock(); | 108 | rcu_read_unlock(); |
109 | 109 | ||
110 | return hna_global_entry_tmp; | 110 | return tt_global_entry_tmp; |
111 | } | 111 | } |
112 | 112 | ||
113 | int hna_local_init(struct bat_priv *bat_priv) | 113 | int tt_local_init(struct bat_priv *bat_priv) |
114 | { | 114 | { |
115 | if (bat_priv->hna_local_hash) | 115 | if (bat_priv->tt_local_hash) |
116 | return 1; | 116 | return 1; |
117 | 117 | ||
118 | bat_priv->hna_local_hash = hash_new(1024); | 118 | bat_priv->tt_local_hash = hash_new(1024); |
119 | 119 | ||
120 | if (!bat_priv->hna_local_hash) | 120 | if (!bat_priv->tt_local_hash) |
121 | return 0; | 121 | return 0; |
122 | 122 | ||
123 | atomic_set(&bat_priv->hna_local_changed, 0); | 123 | atomic_set(&bat_priv->tt_local_changed, 0); |
124 | hna_local_start_timer(bat_priv); | 124 | tt_local_start_timer(bat_priv); |
125 | 125 | ||
126 | return 1; | 126 | return 1; |
127 | } | 127 | } |
128 | 128 | ||
129 | void hna_local_add(struct net_device *soft_iface, uint8_t *addr) | 129 | void tt_local_add(struct net_device *soft_iface, uint8_t *addr) |
130 | { | 130 | { |
131 | struct bat_priv *bat_priv = netdev_priv(soft_iface); | 131 | struct bat_priv *bat_priv = netdev_priv(soft_iface); |
132 | struct hna_local_entry *hna_local_entry; | 132 | struct tt_local_entry *tt_local_entry; |
133 | struct hna_global_entry *hna_global_entry; | 133 | struct tt_global_entry *tt_global_entry; |
134 | int required_bytes; | 134 | int required_bytes; |
135 | 135 | ||
136 | spin_lock_bh(&bat_priv->hna_lhash_lock); | 136 | spin_lock_bh(&bat_priv->tt_lhash_lock); |
137 | hna_local_entry = hna_local_hash_find(bat_priv, addr); | 137 | tt_local_entry = tt_local_hash_find(bat_priv, addr); |
138 | spin_unlock_bh(&bat_priv->hna_lhash_lock); | 138 | spin_unlock_bh(&bat_priv->tt_lhash_lock); |
139 | 139 | ||
140 | if (hna_local_entry) { | 140 | if (tt_local_entry) { |
141 | hna_local_entry->last_seen = jiffies; | 141 | tt_local_entry->last_seen = jiffies; |
142 | return; | 142 | return; |
143 | } | 143 | } |
144 | 144 | ||
145 | /* only announce as many hosts as possible in the batman-packet and | 145 | /* only announce as many hosts as possible in the batman-packet and |
146 | space in batman_packet->num_hna That also should give a limit to | 146 | space in batman_packet->num_tt That also should give a limit to |
147 | MAC-flooding. */ | 147 | MAC-flooding. */ |
148 | required_bytes = (bat_priv->num_local_hna + 1) * ETH_ALEN; | 148 | required_bytes = (bat_priv->num_local_tt + 1) * ETH_ALEN; |
149 | required_bytes += BAT_PACKET_LEN; | 149 | required_bytes += BAT_PACKET_LEN; |
150 | 150 | ||
151 | if ((required_bytes > ETH_DATA_LEN) || | 151 | if ((required_bytes > ETH_DATA_LEN) || |
152 | (atomic_read(&bat_priv->aggregated_ogms) && | 152 | (atomic_read(&bat_priv->aggregated_ogms) && |
153 | required_bytes > MAX_AGGREGATION_BYTES) || | 153 | required_bytes > MAX_AGGREGATION_BYTES) || |
154 | (bat_priv->num_local_hna + 1 > 255)) { | 154 | (bat_priv->num_local_tt + 1 > 255)) { |
155 | bat_dbg(DBG_ROUTES, bat_priv, | 155 | bat_dbg(DBG_ROUTES, bat_priv, |
156 | "Can't add new local hna entry (%pM): " | 156 | "Can't add new local tt entry (%pM): " |
157 | "number of local hna entries exceeds packet size\n", | 157 | "number of local tt entries exceeds packet size\n", |
158 | addr); | 158 | addr); |
159 | return; | 159 | return; |
160 | } | 160 | } |
161 | 161 | ||
162 | bat_dbg(DBG_ROUTES, bat_priv, | 162 | bat_dbg(DBG_ROUTES, bat_priv, |
163 | "Creating new local hna entry: %pM\n", addr); | 163 | "Creating new local tt entry: %pM\n", addr); |
164 | 164 | ||
165 | hna_local_entry = kmalloc(sizeof(struct hna_local_entry), GFP_ATOMIC); | 165 | tt_local_entry = kmalloc(sizeof(struct tt_local_entry), GFP_ATOMIC); |
166 | if (!hna_local_entry) | 166 | if (!tt_local_entry) |
167 | return; | 167 | return; |
168 | 168 | ||
169 | memcpy(hna_local_entry->addr, addr, ETH_ALEN); | 169 | memcpy(tt_local_entry->addr, addr, ETH_ALEN); |
170 | hna_local_entry->last_seen = jiffies; | 170 | tt_local_entry->last_seen = jiffies; |
171 | 171 | ||
172 | /* the batman interface mac address should never be purged */ | 172 | /* the batman interface mac address should never be purged */ |
173 | if (compare_eth(addr, soft_iface->dev_addr)) | 173 | if (compare_eth(addr, soft_iface->dev_addr)) |
174 | hna_local_entry->never_purge = 1; | 174 | tt_local_entry->never_purge = 1; |
175 | else | 175 | else |
176 | hna_local_entry->never_purge = 0; | 176 | tt_local_entry->never_purge = 0; |
177 | 177 | ||
178 | spin_lock_bh(&bat_priv->hna_lhash_lock); | 178 | spin_lock_bh(&bat_priv->tt_lhash_lock); |
179 | 179 | ||
180 | hash_add(bat_priv->hna_local_hash, compare_lhna, choose_orig, | 180 | hash_add(bat_priv->tt_local_hash, compare_ltt, choose_orig, |
181 | hna_local_entry, &hna_local_entry->hash_entry); | 181 | tt_local_entry, &tt_local_entry->hash_entry); |
182 | bat_priv->num_local_hna++; | 182 | bat_priv->num_local_tt++; |
183 | atomic_set(&bat_priv->hna_local_changed, 1); | 183 | atomic_set(&bat_priv->tt_local_changed, 1); |
184 | 184 | ||
185 | spin_unlock_bh(&bat_priv->hna_lhash_lock); | 185 | spin_unlock_bh(&bat_priv->tt_lhash_lock); |
186 | 186 | ||
187 | /* remove address from global hash if present */ | 187 | /* remove address from global hash if present */ |
188 | spin_lock_bh(&bat_priv->hna_ghash_lock); | 188 | spin_lock_bh(&bat_priv->tt_ghash_lock); |
189 | 189 | ||
190 | hna_global_entry = hna_global_hash_find(bat_priv, addr); | 190 | tt_global_entry = tt_global_hash_find(bat_priv, addr); |
191 | 191 | ||
192 | if (hna_global_entry) | 192 | if (tt_global_entry) |
193 | _hna_global_del_orig(bat_priv, hna_global_entry, | 193 | _tt_global_del_orig(bat_priv, tt_global_entry, |
194 | "local hna received"); | 194 | "local tt received"); |
195 | 195 | ||
196 | spin_unlock_bh(&bat_priv->hna_ghash_lock); | 196 | spin_unlock_bh(&bat_priv->tt_ghash_lock); |
197 | } | 197 | } |
198 | 198 | ||
199 | int hna_local_fill_buffer(struct bat_priv *bat_priv, | 199 | int tt_local_fill_buffer(struct bat_priv *bat_priv, |
200 | unsigned char *buff, int buff_len) | 200 | unsigned char *buff, int buff_len) |
201 | { | 201 | { |
202 | struct hashtable_t *hash = bat_priv->hna_local_hash; | 202 | struct hashtable_t *hash = bat_priv->tt_local_hash; |
203 | struct hna_local_entry *hna_local_entry; | 203 | struct tt_local_entry *tt_local_entry; |
204 | struct hlist_node *node; | 204 | struct hlist_node *node; |
205 | struct hlist_head *head; | 205 | struct hlist_head *head; |
206 | int i, count = 0; | 206 | int i, count = 0; |
207 | 207 | ||
208 | spin_lock_bh(&bat_priv->hna_lhash_lock); | 208 | spin_lock_bh(&bat_priv->tt_lhash_lock); |
209 | 209 | ||
210 | for (i = 0; i < hash->size; i++) { | 210 | for (i = 0; i < hash->size; i++) { |
211 | head = &hash->table[i]; | 211 | head = &hash->table[i]; |
212 | 212 | ||
213 | rcu_read_lock(); | 213 | rcu_read_lock(); |
214 | hlist_for_each_entry_rcu(hna_local_entry, node, | 214 | hlist_for_each_entry_rcu(tt_local_entry, node, |
215 | head, hash_entry) { | 215 | head, hash_entry) { |
216 | if (buff_len < (count + 1) * ETH_ALEN) | 216 | if (buff_len < (count + 1) * ETH_ALEN) |
217 | break; | 217 | break; |
218 | 218 | ||
219 | memcpy(buff + (count * ETH_ALEN), hna_local_entry->addr, | 219 | memcpy(buff + (count * ETH_ALEN), tt_local_entry->addr, |
220 | ETH_ALEN); | 220 | ETH_ALEN); |
221 | 221 | ||
222 | count++; | 222 | count++; |
@@ -224,20 +224,20 @@ int hna_local_fill_buffer(struct bat_priv *bat_priv, | |||
224 | rcu_read_unlock(); | 224 | rcu_read_unlock(); |
225 | } | 225 | } |
226 | 226 | ||
227 | /* if we did not get all new local hnas see you next time ;-) */ | 227 | /* if we did not get all new local tts see you next time ;-) */ |
228 | if (count == bat_priv->num_local_hna) | 228 | if (count == bat_priv->num_local_tt) |
229 | atomic_set(&bat_priv->hna_local_changed, 0); | 229 | atomic_set(&bat_priv->tt_local_changed, 0); |
230 | 230 | ||
231 | spin_unlock_bh(&bat_priv->hna_lhash_lock); | 231 | spin_unlock_bh(&bat_priv->tt_lhash_lock); |
232 | return count; | 232 | return count; |
233 | } | 233 | } |
234 | 234 | ||
235 | int hna_local_seq_print_text(struct seq_file *seq, void *offset) | 235 | int tt_local_seq_print_text(struct seq_file *seq, void *offset) |
236 | { | 236 | { |
237 | struct net_device *net_dev = (struct net_device *)seq->private; | 237 | struct net_device *net_dev = (struct net_device *)seq->private; |
238 | struct bat_priv *bat_priv = netdev_priv(net_dev); | 238 | struct bat_priv *bat_priv = netdev_priv(net_dev); |
239 | struct hashtable_t *hash = bat_priv->hna_local_hash; | 239 | struct hashtable_t *hash = bat_priv->tt_local_hash; |
240 | struct hna_local_entry *hna_local_entry; | 240 | struct tt_local_entry *tt_local_entry; |
241 | struct hard_iface *primary_if; | 241 | struct hard_iface *primary_if; |
242 | struct hlist_node *node; | 242 | struct hlist_node *node; |
243 | struct hlist_head *head; | 243 | struct hlist_head *head; |
@@ -261,10 +261,10 @@ int hna_local_seq_print_text(struct seq_file *seq, void *offset) | |||
261 | } | 261 | } |
262 | 262 | ||
263 | seq_printf(seq, "Locally retrieved addresses (from %s) " | 263 | seq_printf(seq, "Locally retrieved addresses (from %s) " |
264 | "announced via HNA:\n", | 264 | "announced via TT:\n", |
265 | net_dev->name); | 265 | net_dev->name); |
266 | 266 | ||
267 | spin_lock_bh(&bat_priv->hna_lhash_lock); | 267 | spin_lock_bh(&bat_priv->tt_lhash_lock); |
268 | 268 | ||
269 | buf_size = 1; | 269 | buf_size = 1; |
270 | /* Estimate length for: " * xx:xx:xx:xx:xx:xx\n" */ | 270 | /* Estimate length for: " * xx:xx:xx:xx:xx:xx\n" */ |
@@ -279,7 +279,7 @@ int hna_local_seq_print_text(struct seq_file *seq, void *offset) | |||
279 | 279 | ||
280 | buff = kmalloc(buf_size, GFP_ATOMIC); | 280 | buff = kmalloc(buf_size, GFP_ATOMIC); |
281 | if (!buff) { | 281 | if (!buff) { |
282 | spin_unlock_bh(&bat_priv->hna_lhash_lock); | 282 | spin_unlock_bh(&bat_priv->tt_lhash_lock); |
283 | ret = -ENOMEM; | 283 | ret = -ENOMEM; |
284 | goto out; | 284 | goto out; |
285 | } | 285 | } |
@@ -291,15 +291,15 @@ int hna_local_seq_print_text(struct seq_file *seq, void *offset) | |||
291 | head = &hash->table[i]; | 291 | head = &hash->table[i]; |
292 | 292 | ||
293 | rcu_read_lock(); | 293 | rcu_read_lock(); |
294 | hlist_for_each_entry_rcu(hna_local_entry, node, | 294 | hlist_for_each_entry_rcu(tt_local_entry, node, |
295 | head, hash_entry) { | 295 | head, hash_entry) { |
296 | pos += snprintf(buff + pos, 22, " * %pM\n", | 296 | pos += snprintf(buff + pos, 22, " * %pM\n", |
297 | hna_local_entry->addr); | 297 | tt_local_entry->addr); |
298 | } | 298 | } |
299 | rcu_read_unlock(); | 299 | rcu_read_unlock(); |
300 | } | 300 | } |
301 | 301 | ||
302 | spin_unlock_bh(&bat_priv->hna_lhash_lock); | 302 | spin_unlock_bh(&bat_priv->tt_lhash_lock); |
303 | 303 | ||
304 | seq_printf(seq, "%s", buff); | 304 | seq_printf(seq, "%s", buff); |
305 | kfree(buff); | 305 | kfree(buff); |
@@ -309,180 +309,180 @@ out: | |||
309 | return ret; | 309 | return ret; |
310 | } | 310 | } |
311 | 311 | ||
312 | static void _hna_local_del(struct hlist_node *node, void *arg) | 312 | static void _tt_local_del(struct hlist_node *node, void *arg) |
313 | { | 313 | { |
314 | struct bat_priv *bat_priv = (struct bat_priv *)arg; | 314 | struct bat_priv *bat_priv = (struct bat_priv *)arg; |
315 | void *data = container_of(node, struct hna_local_entry, hash_entry); | 315 | void *data = container_of(node, struct tt_local_entry, hash_entry); |
316 | 316 | ||
317 | kfree(data); | 317 | kfree(data); |
318 | bat_priv->num_local_hna--; | 318 | bat_priv->num_local_tt--; |
319 | atomic_set(&bat_priv->hna_local_changed, 1); | 319 | atomic_set(&bat_priv->tt_local_changed, 1); |
320 | } | 320 | } |
321 | 321 | ||
322 | static void hna_local_del(struct bat_priv *bat_priv, | 322 | static void tt_local_del(struct bat_priv *bat_priv, |
323 | struct hna_local_entry *hna_local_entry, | 323 | struct tt_local_entry *tt_local_entry, |
324 | char *message) | 324 | char *message) |
325 | { | 325 | { |
326 | bat_dbg(DBG_ROUTES, bat_priv, "Deleting local hna entry (%pM): %s\n", | 326 | bat_dbg(DBG_ROUTES, bat_priv, "Deleting local tt entry (%pM): %s\n", |
327 | hna_local_entry->addr, message); | 327 | tt_local_entry->addr, message); |
328 | 328 | ||
329 | hash_remove(bat_priv->hna_local_hash, compare_lhna, choose_orig, | 329 | hash_remove(bat_priv->tt_local_hash, compare_ltt, choose_orig, |
330 | hna_local_entry->addr); | 330 | tt_local_entry->addr); |
331 | _hna_local_del(&hna_local_entry->hash_entry, bat_priv); | 331 | _tt_local_del(&tt_local_entry->hash_entry, bat_priv); |
332 | } | 332 | } |
333 | 333 | ||
334 | void hna_local_remove(struct bat_priv *bat_priv, | 334 | void tt_local_remove(struct bat_priv *bat_priv, |
335 | uint8_t *addr, char *message) | 335 | uint8_t *addr, char *message) |
336 | { | 336 | { |
337 | struct hna_local_entry *hna_local_entry; | 337 | struct tt_local_entry *tt_local_entry; |
338 | 338 | ||
339 | spin_lock_bh(&bat_priv->hna_lhash_lock); | 339 | spin_lock_bh(&bat_priv->tt_lhash_lock); |
340 | 340 | ||
341 | hna_local_entry = hna_local_hash_find(bat_priv, addr); | 341 | tt_local_entry = tt_local_hash_find(bat_priv, addr); |
342 | 342 | ||
343 | if (hna_local_entry) | 343 | if (tt_local_entry) |
344 | hna_local_del(bat_priv, hna_local_entry, message); | 344 | tt_local_del(bat_priv, tt_local_entry, message); |
345 | 345 | ||
346 | spin_unlock_bh(&bat_priv->hna_lhash_lock); | 346 | spin_unlock_bh(&bat_priv->tt_lhash_lock); |
347 | } | 347 | } |
348 | 348 | ||
349 | static void hna_local_purge(struct work_struct *work) | 349 | static void tt_local_purge(struct work_struct *work) |
350 | { | 350 | { |
351 | struct delayed_work *delayed_work = | 351 | struct delayed_work *delayed_work = |
352 | container_of(work, struct delayed_work, work); | 352 | container_of(work, struct delayed_work, work); |
353 | struct bat_priv *bat_priv = | 353 | struct bat_priv *bat_priv = |
354 | container_of(delayed_work, struct bat_priv, hna_work); | 354 | container_of(delayed_work, struct bat_priv, tt_work); |
355 | struct hashtable_t *hash = bat_priv->hna_local_hash; | 355 | struct hashtable_t *hash = bat_priv->tt_local_hash; |
356 | struct hna_local_entry *hna_local_entry; | 356 | struct tt_local_entry *tt_local_entry; |
357 | struct hlist_node *node, *node_tmp; | 357 | struct hlist_node *node, *node_tmp; |
358 | struct hlist_head *head; | 358 | struct hlist_head *head; |
359 | unsigned long timeout; | 359 | unsigned long timeout; |
360 | int i; | 360 | int i; |
361 | 361 | ||
362 | spin_lock_bh(&bat_priv->hna_lhash_lock); | 362 | spin_lock_bh(&bat_priv->tt_lhash_lock); |
363 | 363 | ||
364 | for (i = 0; i < hash->size; i++) { | 364 | for (i = 0; i < hash->size; i++) { |
365 | head = &hash->table[i]; | 365 | head = &hash->table[i]; |
366 | 366 | ||
367 | hlist_for_each_entry_safe(hna_local_entry, node, node_tmp, | 367 | hlist_for_each_entry_safe(tt_local_entry, node, node_tmp, |
368 | head, hash_entry) { | 368 | head, hash_entry) { |
369 | if (hna_local_entry->never_purge) | 369 | if (tt_local_entry->never_purge) |
370 | continue; | 370 | continue; |
371 | 371 | ||
372 | timeout = hna_local_entry->last_seen; | 372 | timeout = tt_local_entry->last_seen; |
373 | timeout += LOCAL_HNA_TIMEOUT * HZ; | 373 | timeout += TT_LOCAL_TIMEOUT * HZ; |
374 | 374 | ||
375 | if (time_before(jiffies, timeout)) | 375 | if (time_before(jiffies, timeout)) |
376 | continue; | 376 | continue; |
377 | 377 | ||
378 | hna_local_del(bat_priv, hna_local_entry, | 378 | tt_local_del(bat_priv, tt_local_entry, |
379 | "address timed out"); | 379 | "address timed out"); |
380 | } | 380 | } |
381 | } | 381 | } |
382 | 382 | ||
383 | spin_unlock_bh(&bat_priv->hna_lhash_lock); | 383 | spin_unlock_bh(&bat_priv->tt_lhash_lock); |
384 | hna_local_start_timer(bat_priv); | 384 | tt_local_start_timer(bat_priv); |
385 | } | 385 | } |
386 | 386 | ||
387 | void hna_local_free(struct bat_priv *bat_priv) | 387 | void tt_local_free(struct bat_priv *bat_priv) |
388 | { | 388 | { |
389 | if (!bat_priv->hna_local_hash) | 389 | if (!bat_priv->tt_local_hash) |
390 | return; | 390 | return; |
391 | 391 | ||
392 | cancel_delayed_work_sync(&bat_priv->hna_work); | 392 | cancel_delayed_work_sync(&bat_priv->tt_work); |
393 | hash_delete(bat_priv->hna_local_hash, _hna_local_del, bat_priv); | 393 | hash_delete(bat_priv->tt_local_hash, _tt_local_del, bat_priv); |
394 | bat_priv->hna_local_hash = NULL; | 394 | bat_priv->tt_local_hash = NULL; |
395 | } | 395 | } |
396 | 396 | ||
397 | int hna_global_init(struct bat_priv *bat_priv) | 397 | int tt_global_init(struct bat_priv *bat_priv) |
398 | { | 398 | { |
399 | if (bat_priv->hna_global_hash) | 399 | if (bat_priv->tt_global_hash) |
400 | return 1; | 400 | return 1; |
401 | 401 | ||
402 | bat_priv->hna_global_hash = hash_new(1024); | 402 | bat_priv->tt_global_hash = hash_new(1024); |
403 | 403 | ||
404 | if (!bat_priv->hna_global_hash) | 404 | if (!bat_priv->tt_global_hash) |
405 | return 0; | 405 | return 0; |
406 | 406 | ||
407 | return 1; | 407 | return 1; |
408 | } | 408 | } |
409 | 409 | ||
410 | void hna_global_add_orig(struct bat_priv *bat_priv, | 410 | void tt_global_add_orig(struct bat_priv *bat_priv, |
411 | struct orig_node *orig_node, | 411 | struct orig_node *orig_node, |
412 | unsigned char *hna_buff, int hna_buff_len) | 412 | unsigned char *tt_buff, int tt_buff_len) |
413 | { | 413 | { |
414 | struct hna_global_entry *hna_global_entry; | 414 | struct tt_global_entry *tt_global_entry; |
415 | struct hna_local_entry *hna_local_entry; | 415 | struct tt_local_entry *tt_local_entry; |
416 | int hna_buff_count = 0; | 416 | int tt_buff_count = 0; |
417 | unsigned char *hna_ptr; | 417 | unsigned char *tt_ptr; |
418 | 418 | ||
419 | while ((hna_buff_count + 1) * ETH_ALEN <= hna_buff_len) { | 419 | while ((tt_buff_count + 1) * ETH_ALEN <= tt_buff_len) { |
420 | spin_lock_bh(&bat_priv->hna_ghash_lock); | 420 | spin_lock_bh(&bat_priv->tt_ghash_lock); |
421 | 421 | ||
422 | hna_ptr = hna_buff + (hna_buff_count * ETH_ALEN); | 422 | tt_ptr = tt_buff + (tt_buff_count * ETH_ALEN); |
423 | hna_global_entry = hna_global_hash_find(bat_priv, hna_ptr); | 423 | tt_global_entry = tt_global_hash_find(bat_priv, tt_ptr); |
424 | 424 | ||
425 | if (!hna_global_entry) { | 425 | if (!tt_global_entry) { |
426 | spin_unlock_bh(&bat_priv->hna_ghash_lock); | 426 | spin_unlock_bh(&bat_priv->tt_ghash_lock); |
427 | 427 | ||
428 | hna_global_entry = | 428 | tt_global_entry = |
429 | kmalloc(sizeof(struct hna_global_entry), | 429 | kmalloc(sizeof(struct tt_global_entry), |
430 | GFP_ATOMIC); | 430 | GFP_ATOMIC); |
431 | 431 | ||
432 | if (!hna_global_entry) | 432 | if (!tt_global_entry) |
433 | break; | 433 | break; |
434 | 434 | ||
435 | memcpy(hna_global_entry->addr, hna_ptr, ETH_ALEN); | 435 | memcpy(tt_global_entry->addr, tt_ptr, ETH_ALEN); |
436 | 436 | ||
437 | bat_dbg(DBG_ROUTES, bat_priv, | 437 | bat_dbg(DBG_ROUTES, bat_priv, |
438 | "Creating new global hna entry: " | 438 | "Creating new global tt entry: " |
439 | "%pM (via %pM)\n", | 439 | "%pM (via %pM)\n", |
440 | hna_global_entry->addr, orig_node->orig); | 440 | tt_global_entry->addr, orig_node->orig); |
441 | 441 | ||
442 | spin_lock_bh(&bat_priv->hna_ghash_lock); | 442 | spin_lock_bh(&bat_priv->tt_ghash_lock); |
443 | hash_add(bat_priv->hna_global_hash, compare_ghna, | 443 | hash_add(bat_priv->tt_global_hash, compare_gtt, |
444 | choose_orig, hna_global_entry, | 444 | choose_orig, tt_global_entry, |
445 | &hna_global_entry->hash_entry); | 445 | &tt_global_entry->hash_entry); |
446 | 446 | ||
447 | } | 447 | } |
448 | 448 | ||
449 | hna_global_entry->orig_node = orig_node; | 449 | tt_global_entry->orig_node = orig_node; |
450 | spin_unlock_bh(&bat_priv->hna_ghash_lock); | 450 | spin_unlock_bh(&bat_priv->tt_ghash_lock); |
451 | 451 | ||
452 | /* remove address from local hash if present */ | 452 | /* remove address from local hash if present */ |
453 | spin_lock_bh(&bat_priv->hna_lhash_lock); | 453 | spin_lock_bh(&bat_priv->tt_lhash_lock); |
454 | 454 | ||
455 | hna_ptr = hna_buff + (hna_buff_count * ETH_ALEN); | 455 | tt_ptr = tt_buff + (tt_buff_count * ETH_ALEN); |
456 | hna_local_entry = hna_local_hash_find(bat_priv, hna_ptr); | 456 | tt_local_entry = tt_local_hash_find(bat_priv, tt_ptr); |
457 | 457 | ||
458 | if (hna_local_entry) | 458 | if (tt_local_entry) |
459 | hna_local_del(bat_priv, hna_local_entry, | 459 | tt_local_del(bat_priv, tt_local_entry, |
460 | "global hna received"); | 460 | "global tt received"); |
461 | 461 | ||
462 | spin_unlock_bh(&bat_priv->hna_lhash_lock); | 462 | spin_unlock_bh(&bat_priv->tt_lhash_lock); |
463 | 463 | ||
464 | hna_buff_count++; | 464 | tt_buff_count++; |
465 | } | 465 | } |
466 | 466 | ||
467 | /* initialize, and overwrite if malloc succeeds */ | 467 | /* initialize, and overwrite if malloc succeeds */ |
468 | orig_node->hna_buff = NULL; | 468 | orig_node->tt_buff = NULL; |
469 | orig_node->hna_buff_len = 0; | 469 | orig_node->tt_buff_len = 0; |
470 | 470 | ||
471 | if (hna_buff_len > 0) { | 471 | if (tt_buff_len > 0) { |
472 | orig_node->hna_buff = kmalloc(hna_buff_len, GFP_ATOMIC); | 472 | orig_node->tt_buff = kmalloc(tt_buff_len, GFP_ATOMIC); |
473 | if (orig_node->hna_buff) { | 473 | if (orig_node->tt_buff) { |
474 | memcpy(orig_node->hna_buff, hna_buff, hna_buff_len); | 474 | memcpy(orig_node->tt_buff, tt_buff, tt_buff_len); |
475 | orig_node->hna_buff_len = hna_buff_len; | 475 | orig_node->tt_buff_len = tt_buff_len; |
476 | } | 476 | } |
477 | } | 477 | } |
478 | } | 478 | } |
479 | 479 | ||
480 | int hna_global_seq_print_text(struct seq_file *seq, void *offset) | 480 | int tt_global_seq_print_text(struct seq_file *seq, void *offset) |
481 | { | 481 | { |
482 | struct net_device *net_dev = (struct net_device *)seq->private; | 482 | struct net_device *net_dev = (struct net_device *)seq->private; |
483 | struct bat_priv *bat_priv = netdev_priv(net_dev); | 483 | struct bat_priv *bat_priv = netdev_priv(net_dev); |
484 | struct hashtable_t *hash = bat_priv->hna_global_hash; | 484 | struct hashtable_t *hash = bat_priv->tt_global_hash; |
485 | struct hna_global_entry *hna_global_entry; | 485 | struct tt_global_entry *tt_global_entry; |
486 | struct hard_iface *primary_if; | 486 | struct hard_iface *primary_if; |
487 | struct hlist_node *node; | 487 | struct hlist_node *node; |
488 | struct hlist_head *head; | 488 | struct hlist_head *head; |
@@ -505,10 +505,11 @@ int hna_global_seq_print_text(struct seq_file *seq, void *offset) | |||
505 | goto out; | 505 | goto out; |
506 | } | 506 | } |
507 | 507 | ||
508 | seq_printf(seq, "Globally announced HNAs received via the mesh %s\n", | 508 | seq_printf(seq, |
509 | "Globally announced TT entries received via the mesh %s\n", | ||
509 | net_dev->name); | 510 | net_dev->name); |
510 | 511 | ||
511 | spin_lock_bh(&bat_priv->hna_ghash_lock); | 512 | spin_lock_bh(&bat_priv->tt_ghash_lock); |
512 | 513 | ||
513 | buf_size = 1; | 514 | buf_size = 1; |
514 | /* Estimate length for: " * xx:xx:xx:xx:xx:xx via xx:xx:xx:xx:xx:xx\n"*/ | 515 | /* Estimate length for: " * xx:xx:xx:xx:xx:xx via xx:xx:xx:xx:xx:xx\n"*/ |
@@ -523,7 +524,7 @@ int hna_global_seq_print_text(struct seq_file *seq, void *offset) | |||
523 | 524 | ||
524 | buff = kmalloc(buf_size, GFP_ATOMIC); | 525 | buff = kmalloc(buf_size, GFP_ATOMIC); |
525 | if (!buff) { | 526 | if (!buff) { |
526 | spin_unlock_bh(&bat_priv->hna_ghash_lock); | 527 | spin_unlock_bh(&bat_priv->tt_ghash_lock); |
527 | ret = -ENOMEM; | 528 | ret = -ENOMEM; |
528 | goto out; | 529 | goto out; |
529 | } | 530 | } |
@@ -534,17 +535,17 @@ int hna_global_seq_print_text(struct seq_file *seq, void *offset) | |||
534 | head = &hash->table[i]; | 535 | head = &hash->table[i]; |
535 | 536 | ||
536 | rcu_read_lock(); | 537 | rcu_read_lock(); |
537 | hlist_for_each_entry_rcu(hna_global_entry, node, | 538 | hlist_for_each_entry_rcu(tt_global_entry, node, |
538 | head, hash_entry) { | 539 | head, hash_entry) { |
539 | pos += snprintf(buff + pos, 44, | 540 | pos += snprintf(buff + pos, 44, |
540 | " * %pM via %pM\n", | 541 | " * %pM via %pM\n", |
541 | hna_global_entry->addr, | 542 | tt_global_entry->addr, |
542 | hna_global_entry->orig_node->orig); | 543 | tt_global_entry->orig_node->orig); |
543 | } | 544 | } |
544 | rcu_read_unlock(); | 545 | rcu_read_unlock(); |
545 | } | 546 | } |
546 | 547 | ||
547 | spin_unlock_bh(&bat_priv->hna_ghash_lock); | 548 | spin_unlock_bh(&bat_priv->tt_ghash_lock); |
548 | 549 | ||
549 | seq_printf(seq, "%s", buff); | 550 | seq_printf(seq, "%s", buff); |
550 | kfree(buff); | 551 | kfree(buff); |
@@ -554,84 +555,84 @@ out: | |||
554 | return ret; | 555 | return ret; |
555 | } | 556 | } |
556 | 557 | ||
557 | static void _hna_global_del_orig(struct bat_priv *bat_priv, | 558 | static void _tt_global_del_orig(struct bat_priv *bat_priv, |
558 | struct hna_global_entry *hna_global_entry, | 559 | struct tt_global_entry *tt_global_entry, |
559 | char *message) | 560 | char *message) |
560 | { | 561 | { |
561 | bat_dbg(DBG_ROUTES, bat_priv, | 562 | bat_dbg(DBG_ROUTES, bat_priv, |
562 | "Deleting global hna entry %pM (via %pM): %s\n", | 563 | "Deleting global tt entry %pM (via %pM): %s\n", |
563 | hna_global_entry->addr, hna_global_entry->orig_node->orig, | 564 | tt_global_entry->addr, tt_global_entry->orig_node->orig, |
564 | message); | 565 | message); |
565 | 566 | ||
566 | hash_remove(bat_priv->hna_global_hash, compare_ghna, choose_orig, | 567 | hash_remove(bat_priv->tt_global_hash, compare_gtt, choose_orig, |
567 | hna_global_entry->addr); | 568 | tt_global_entry->addr); |
568 | kfree(hna_global_entry); | 569 | kfree(tt_global_entry); |
569 | } | 570 | } |
570 | 571 | ||
571 | void hna_global_del_orig(struct bat_priv *bat_priv, | 572 | void tt_global_del_orig(struct bat_priv *bat_priv, |
572 | struct orig_node *orig_node, char *message) | 573 | struct orig_node *orig_node, char *message) |
573 | { | 574 | { |
574 | struct hna_global_entry *hna_global_entry; | 575 | struct tt_global_entry *tt_global_entry; |
575 | int hna_buff_count = 0; | 576 | int tt_buff_count = 0; |
576 | unsigned char *hna_ptr; | 577 | unsigned char *tt_ptr; |
577 | 578 | ||
578 | if (orig_node->hna_buff_len == 0) | 579 | if (orig_node->tt_buff_len == 0) |
579 | return; | 580 | return; |
580 | 581 | ||
581 | spin_lock_bh(&bat_priv->hna_ghash_lock); | 582 | spin_lock_bh(&bat_priv->tt_ghash_lock); |
582 | 583 | ||
583 | while ((hna_buff_count + 1) * ETH_ALEN <= orig_node->hna_buff_len) { | 584 | while ((tt_buff_count + 1) * ETH_ALEN <= orig_node->tt_buff_len) { |
584 | hna_ptr = orig_node->hna_buff + (hna_buff_count * ETH_ALEN); | 585 | tt_ptr = orig_node->tt_buff + (tt_buff_count * ETH_ALEN); |
585 | hna_global_entry = hna_global_hash_find(bat_priv, hna_ptr); | 586 | tt_global_entry = tt_global_hash_find(bat_priv, tt_ptr); |
586 | 587 | ||
587 | if ((hna_global_entry) && | 588 | if ((tt_global_entry) && |
588 | (hna_global_entry->orig_node == orig_node)) | 589 | (tt_global_entry->orig_node == orig_node)) |
589 | _hna_global_del_orig(bat_priv, hna_global_entry, | 590 | _tt_global_del_orig(bat_priv, tt_global_entry, |
590 | message); | 591 | message); |
591 | 592 | ||
592 | hna_buff_count++; | 593 | tt_buff_count++; |
593 | } | 594 | } |
594 | 595 | ||
595 | spin_unlock_bh(&bat_priv->hna_ghash_lock); | 596 | spin_unlock_bh(&bat_priv->tt_ghash_lock); |
596 | 597 | ||
597 | orig_node->hna_buff_len = 0; | 598 | orig_node->tt_buff_len = 0; |
598 | kfree(orig_node->hna_buff); | 599 | kfree(orig_node->tt_buff); |
599 | orig_node->hna_buff = NULL; | 600 | orig_node->tt_buff = NULL; |
600 | } | 601 | } |
601 | 602 | ||
602 | static void hna_global_del(struct hlist_node *node, void *arg) | 603 | static void tt_global_del(struct hlist_node *node, void *arg) |
603 | { | 604 | { |
604 | void *data = container_of(node, struct hna_global_entry, hash_entry); | 605 | void *data = container_of(node, struct tt_global_entry, hash_entry); |
605 | 606 | ||
606 | kfree(data); | 607 | kfree(data); |
607 | } | 608 | } |
608 | 609 | ||
609 | void hna_global_free(struct bat_priv *bat_priv) | 610 | void tt_global_free(struct bat_priv *bat_priv) |
610 | { | 611 | { |
611 | if (!bat_priv->hna_global_hash) | 612 | if (!bat_priv->tt_global_hash) |
612 | return; | 613 | return; |
613 | 614 | ||
614 | hash_delete(bat_priv->hna_global_hash, hna_global_del, NULL); | 615 | hash_delete(bat_priv->tt_global_hash, tt_global_del, NULL); |
615 | bat_priv->hna_global_hash = NULL; | 616 | bat_priv->tt_global_hash = NULL; |
616 | } | 617 | } |
617 | 618 | ||
618 | struct orig_node *transtable_search(struct bat_priv *bat_priv, uint8_t *addr) | 619 | struct orig_node *transtable_search(struct bat_priv *bat_priv, uint8_t *addr) |
619 | { | 620 | { |
620 | struct hna_global_entry *hna_global_entry; | 621 | struct tt_global_entry *tt_global_entry; |
621 | struct orig_node *orig_node = NULL; | 622 | struct orig_node *orig_node = NULL; |
622 | 623 | ||
623 | spin_lock_bh(&bat_priv->hna_ghash_lock); | 624 | spin_lock_bh(&bat_priv->tt_ghash_lock); |
624 | hna_global_entry = hna_global_hash_find(bat_priv, addr); | 625 | tt_global_entry = tt_global_hash_find(bat_priv, addr); |
625 | 626 | ||
626 | if (!hna_global_entry) | 627 | if (!tt_global_entry) |
627 | goto out; | 628 | goto out; |
628 | 629 | ||
629 | if (!atomic_inc_not_zero(&hna_global_entry->orig_node->refcount)) | 630 | if (!atomic_inc_not_zero(&tt_global_entry->orig_node->refcount)) |
630 | goto out; | 631 | goto out; |
631 | 632 | ||
632 | orig_node = hna_global_entry->orig_node; | 633 | orig_node = tt_global_entry->orig_node; |
633 | 634 | ||
634 | out: | 635 | out: |
635 | spin_unlock_bh(&bat_priv->hna_ghash_lock); | 636 | spin_unlock_bh(&bat_priv->tt_ghash_lock); |
636 | return orig_node; | 637 | return orig_node; |
637 | } | 638 | } |
diff --git a/net/batman-adv/translation-table.h b/net/batman-adv/translation-table.h index f19931ca1457..46152c38cc95 100644 --- a/net/batman-adv/translation-table.h +++ b/net/batman-adv/translation-table.h | |||
@@ -22,22 +22,22 @@ | |||
22 | #ifndef _NET_BATMAN_ADV_TRANSLATION_TABLE_H_ | 22 | #ifndef _NET_BATMAN_ADV_TRANSLATION_TABLE_H_ |
23 | #define _NET_BATMAN_ADV_TRANSLATION_TABLE_H_ | 23 | #define _NET_BATMAN_ADV_TRANSLATION_TABLE_H_ |
24 | 24 | ||
25 | int hna_local_init(struct bat_priv *bat_priv); | 25 | int tt_local_init(struct bat_priv *bat_priv); |
26 | void hna_local_add(struct net_device *soft_iface, uint8_t *addr); | 26 | void tt_local_add(struct net_device *soft_iface, uint8_t *addr); |
27 | void hna_local_remove(struct bat_priv *bat_priv, | 27 | void tt_local_remove(struct bat_priv *bat_priv, |
28 | uint8_t *addr, char *message); | 28 | uint8_t *addr, char *message); |
29 | int hna_local_fill_buffer(struct bat_priv *bat_priv, | 29 | int tt_local_fill_buffer(struct bat_priv *bat_priv, |
30 | unsigned char *buff, int buff_len); | 30 | unsigned char *buff, int buff_len); |
31 | int hna_local_seq_print_text(struct seq_file *seq, void *offset); | 31 | int tt_local_seq_print_text(struct seq_file *seq, void *offset); |
32 | void hna_local_free(struct bat_priv *bat_priv); | 32 | void tt_local_free(struct bat_priv *bat_priv); |
33 | int hna_global_init(struct bat_priv *bat_priv); | 33 | int tt_global_init(struct bat_priv *bat_priv); |
34 | void hna_global_add_orig(struct bat_priv *bat_priv, | 34 | void tt_global_add_orig(struct bat_priv *bat_priv, |
35 | struct orig_node *orig_node, | 35 | struct orig_node *orig_node, |
36 | unsigned char *hna_buff, int hna_buff_len); | 36 | unsigned char *tt_buff, int tt_buff_len); |
37 | int hna_global_seq_print_text(struct seq_file *seq, void *offset); | 37 | int tt_global_seq_print_text(struct seq_file *seq, void *offset); |
38 | void hna_global_del_orig(struct bat_priv *bat_priv, | 38 | void tt_global_del_orig(struct bat_priv *bat_priv, |
39 | struct orig_node *orig_node, char *message); | 39 | struct orig_node *orig_node, char *message); |
40 | void hna_global_free(struct bat_priv *bat_priv); | 40 | void tt_global_free(struct bat_priv *bat_priv); |
41 | struct orig_node *transtable_search(struct bat_priv *bat_priv, uint8_t *addr); | 41 | struct orig_node *transtable_search(struct bat_priv *bat_priv, uint8_t *addr); |
42 | 42 | ||
43 | #endif /* _NET_BATMAN_ADV_TRANSLATION_TABLE_H_ */ | 43 | #endif /* _NET_BATMAN_ADV_TRANSLATION_TABLE_H_ */ |
diff --git a/net/batman-adv/types.h b/net/batman-adv/types.h index 947bafc6431a..fab70e8b16ee 100644 --- a/net/batman-adv/types.h +++ b/net/batman-adv/types.h | |||
@@ -75,8 +75,8 @@ struct orig_node { | |||
75 | unsigned long batman_seqno_reset; | 75 | unsigned long batman_seqno_reset; |
76 | uint8_t gw_flags; | 76 | uint8_t gw_flags; |
77 | uint8_t flags; | 77 | uint8_t flags; |
78 | unsigned char *hna_buff; | 78 | unsigned char *tt_buff; |
79 | int16_t hna_buff_len; | 79 | int16_t tt_buff_len; |
80 | uint32_t last_real_seqno; | 80 | uint32_t last_real_seqno; |
81 | uint8_t last_ttl; | 81 | uint8_t last_ttl; |
82 | unsigned long bcast_bits[NUM_WORDS]; | 82 | unsigned long bcast_bits[NUM_WORDS]; |
@@ -89,11 +89,11 @@ struct orig_node { | |||
89 | struct hlist_node hash_entry; | 89 | struct hlist_node hash_entry; |
90 | struct bat_priv *bat_priv; | 90 | struct bat_priv *bat_priv; |
91 | unsigned long last_frag_packet; | 91 | unsigned long last_frag_packet; |
92 | spinlock_t ogm_cnt_lock; /* protects: bcast_own, bcast_own_sum, | 92 | /* ogm_cnt_lock protects: bcast_own, bcast_own_sum, |
93 | * neigh_node->real_bits, | 93 | * neigh_node->real_bits, neigh_node->real_packet_count */ |
94 | * neigh_node->real_packet_count */ | 94 | spinlock_t ogm_cnt_lock; |
95 | spinlock_t bcast_seqno_lock; /* protects bcast_bits, | 95 | /* bcast_seqno_lock protects bcast_bits, last_bcast_seqno */ |
96 | * last_bcast_seqno */ | 96 | spinlock_t bcast_seqno_lock; |
97 | atomic_t bond_candidates; | 97 | atomic_t bond_candidates; |
98 | struct list_head bond_list; | 98 | struct list_head bond_list; |
99 | }; | 99 | }; |
@@ -146,30 +146,30 @@ struct bat_priv { | |||
146 | atomic_t bcast_queue_left; | 146 | atomic_t bcast_queue_left; |
147 | atomic_t batman_queue_left; | 147 | atomic_t batman_queue_left; |
148 | char num_ifaces; | 148 | char num_ifaces; |
149 | struct hlist_head softif_neigh_list; | ||
150 | struct softif_neigh __rcu *softif_neigh; | ||
151 | struct debug_log *debug_log; | 149 | struct debug_log *debug_log; |
152 | struct kobject *mesh_obj; | 150 | struct kobject *mesh_obj; |
153 | struct dentry *debug_dir; | 151 | struct dentry *debug_dir; |
154 | struct hlist_head forw_bat_list; | 152 | struct hlist_head forw_bat_list; |
155 | struct hlist_head forw_bcast_list; | 153 | struct hlist_head forw_bcast_list; |
156 | struct hlist_head gw_list; | 154 | struct hlist_head gw_list; |
155 | struct hlist_head softif_neigh_vids; | ||
157 | struct list_head vis_send_list; | 156 | struct list_head vis_send_list; |
158 | struct hashtable_t *orig_hash; | 157 | struct hashtable_t *orig_hash; |
159 | struct hashtable_t *hna_local_hash; | 158 | struct hashtable_t *tt_local_hash; |
160 | struct hashtable_t *hna_global_hash; | 159 | struct hashtable_t *tt_global_hash; |
161 | struct hashtable_t *vis_hash; | 160 | struct hashtable_t *vis_hash; |
162 | spinlock_t forw_bat_list_lock; /* protects forw_bat_list */ | 161 | spinlock_t forw_bat_list_lock; /* protects forw_bat_list */ |
163 | spinlock_t forw_bcast_list_lock; /* protects */ | 162 | spinlock_t forw_bcast_list_lock; /* protects */ |
164 | spinlock_t hna_lhash_lock; /* protects hna_local_hash */ | 163 | spinlock_t tt_lhash_lock; /* protects tt_local_hash */ |
165 | spinlock_t hna_ghash_lock; /* protects hna_global_hash */ | 164 | spinlock_t tt_ghash_lock; /* protects tt_global_hash */ |
166 | spinlock_t gw_list_lock; /* protects gw_list and curr_gw */ | 165 | spinlock_t gw_list_lock; /* protects gw_list and curr_gw */ |
167 | spinlock_t vis_hash_lock; /* protects vis_hash */ | 166 | spinlock_t vis_hash_lock; /* protects vis_hash */ |
168 | spinlock_t vis_list_lock; /* protects vis_info::recv_list */ | 167 | spinlock_t vis_list_lock; /* protects vis_info::recv_list */ |
169 | spinlock_t softif_neigh_lock; /* protects soft-interface neigh list */ | 168 | spinlock_t softif_neigh_lock; /* protects soft-interface neigh list */ |
170 | int16_t num_local_hna; | 169 | spinlock_t softif_neigh_vid_lock; /* protects soft-interface vid list */ |
171 | atomic_t hna_local_changed; | 170 | int16_t num_local_tt; |
172 | struct delayed_work hna_work; | 171 | atomic_t tt_local_changed; |
172 | struct delayed_work tt_work; | ||
173 | struct delayed_work orig_work; | 173 | struct delayed_work orig_work; |
174 | struct delayed_work vis_work; | 174 | struct delayed_work vis_work; |
175 | struct gw_node __rcu *curr_gw; /* rcu protected pointer */ | 175 | struct gw_node __rcu *curr_gw; /* rcu protected pointer */ |
@@ -192,14 +192,14 @@ struct socket_packet { | |||
192 | struct icmp_packet_rr icmp_packet; | 192 | struct icmp_packet_rr icmp_packet; |
193 | }; | 193 | }; |
194 | 194 | ||
195 | struct hna_local_entry { | 195 | struct tt_local_entry { |
196 | uint8_t addr[ETH_ALEN]; | 196 | uint8_t addr[ETH_ALEN]; |
197 | unsigned long last_seen; | 197 | unsigned long last_seen; |
198 | char never_purge; | 198 | char never_purge; |
199 | struct hlist_node hash_entry; | 199 | struct hlist_node hash_entry; |
200 | }; | 200 | }; |
201 | 201 | ||
202 | struct hna_global_entry { | 202 | struct tt_global_entry { |
203 | uint8_t addr[ETH_ALEN]; | 203 | uint8_t addr[ETH_ALEN]; |
204 | struct orig_node *orig_node; | 204 | struct orig_node *orig_node; |
205 | struct hlist_node hash_entry; | 205 | struct hlist_node hash_entry; |
@@ -262,7 +262,7 @@ struct vis_info { | |||
262 | struct vis_info_entry { | 262 | struct vis_info_entry { |
263 | uint8_t src[ETH_ALEN]; | 263 | uint8_t src[ETH_ALEN]; |
264 | uint8_t dest[ETH_ALEN]; | 264 | uint8_t dest[ETH_ALEN]; |
265 | uint8_t quality; /* quality = 0 means HNA */ | 265 | uint8_t quality; /* quality = 0 client */ |
266 | } __packed; | 266 | } __packed; |
267 | 267 | ||
268 | struct recvlist_node { | 268 | struct recvlist_node { |
@@ -270,11 +270,20 @@ struct recvlist_node { | |||
270 | uint8_t mac[ETH_ALEN]; | 270 | uint8_t mac[ETH_ALEN]; |
271 | }; | 271 | }; |
272 | 272 | ||
273 | struct softif_neigh_vid { | ||
274 | struct hlist_node list; | ||
275 | struct bat_priv *bat_priv; | ||
276 | short vid; | ||
277 | atomic_t refcount; | ||
278 | struct softif_neigh __rcu *softif_neigh; | ||
279 | struct rcu_head rcu; | ||
280 | struct hlist_head softif_neigh_list; | ||
281 | }; | ||
282 | |||
273 | struct softif_neigh { | 283 | struct softif_neigh { |
274 | struct hlist_node list; | 284 | struct hlist_node list; |
275 | uint8_t addr[ETH_ALEN]; | 285 | uint8_t addr[ETH_ALEN]; |
276 | unsigned long last_seen; | 286 | unsigned long last_seen; |
277 | short vid; | ||
278 | atomic_t refcount; | 287 | atomic_t refcount; |
279 | struct rcu_head rcu; | 288 | struct rcu_head rcu; |
280 | }; | 289 | }; |
diff --git a/net/batman-adv/unicast.c b/net/batman-adv/unicast.c index b46cbf1507e4..19c3daf34ac6 100644 --- a/net/batman-adv/unicast.c +++ b/net/batman-adv/unicast.c | |||
@@ -300,7 +300,7 @@ int unicast_send_skb(struct sk_buff *skb, struct bat_priv *bat_priv) | |||
300 | goto find_router; | 300 | goto find_router; |
301 | } | 301 | } |
302 | 302 | ||
303 | /* check for hna host - increases orig_node refcount */ | 303 | /* check for tt host - increases orig_node refcount */ |
304 | orig_node = transtable_search(bat_priv, ethhdr->h_dest); | 304 | orig_node = transtable_search(bat_priv, ethhdr->h_dest); |
305 | 305 | ||
306 | find_router: | 306 | find_router: |
diff --git a/net/batman-adv/vis.c b/net/batman-adv/vis.c index c8f571d3b5d4..c39f20cc1ba6 100644 --- a/net/batman-adv/vis.c +++ b/net/batman-adv/vis.c | |||
@@ -194,7 +194,7 @@ static ssize_t vis_data_read_entry(char *buff, struct vis_info_entry *entry, | |||
194 | { | 194 | { |
195 | /* maximal length: max(4+17+2, 3+17+1+3+2) == 26 */ | 195 | /* maximal length: max(4+17+2, 3+17+1+3+2) == 26 */ |
196 | if (primary && entry->quality == 0) | 196 | if (primary && entry->quality == 0) |
197 | return sprintf(buff, "HNA %pM, ", entry->dest); | 197 | return sprintf(buff, "TT %pM, ", entry->dest); |
198 | else if (compare_eth(entry->src, src)) | 198 | else if (compare_eth(entry->src, src)) |
199 | return sprintf(buff, "TQ %pM %d, ", entry->dest, | 199 | return sprintf(buff, "TQ %pM %d, ", entry->dest, |
200 | entry->quality); | 200 | entry->quality); |
@@ -622,7 +622,7 @@ static int generate_vis_packet(struct bat_priv *bat_priv) | |||
622 | struct vis_info *info = (struct vis_info *)bat_priv->my_vis_info; | 622 | struct vis_info *info = (struct vis_info *)bat_priv->my_vis_info; |
623 | struct vis_packet *packet = (struct vis_packet *)info->skb_packet->data; | 623 | struct vis_packet *packet = (struct vis_packet *)info->skb_packet->data; |
624 | struct vis_info_entry *entry; | 624 | struct vis_info_entry *entry; |
625 | struct hna_local_entry *hna_local_entry; | 625 | struct tt_local_entry *tt_local_entry; |
626 | int best_tq = -1, i; | 626 | int best_tq = -1, i; |
627 | 627 | ||
628 | info->first_seen = jiffies; | 628 | info->first_seen = jiffies; |
@@ -678,29 +678,29 @@ next: | |||
678 | rcu_read_unlock(); | 678 | rcu_read_unlock(); |
679 | } | 679 | } |
680 | 680 | ||
681 | hash = bat_priv->hna_local_hash; | 681 | hash = bat_priv->tt_local_hash; |
682 | 682 | ||
683 | spin_lock_bh(&bat_priv->hna_lhash_lock); | 683 | spin_lock_bh(&bat_priv->tt_lhash_lock); |
684 | for (i = 0; i < hash->size; i++) { | 684 | for (i = 0; i < hash->size; i++) { |
685 | head = &hash->table[i]; | 685 | head = &hash->table[i]; |
686 | 686 | ||
687 | hlist_for_each_entry(hna_local_entry, node, head, hash_entry) { | 687 | hlist_for_each_entry(tt_local_entry, node, head, hash_entry) { |
688 | entry = (struct vis_info_entry *) | 688 | entry = (struct vis_info_entry *) |
689 | skb_put(info->skb_packet, | 689 | skb_put(info->skb_packet, |
690 | sizeof(*entry)); | 690 | sizeof(*entry)); |
691 | memset(entry->src, 0, ETH_ALEN); | 691 | memset(entry->src, 0, ETH_ALEN); |
692 | memcpy(entry->dest, hna_local_entry->addr, ETH_ALEN); | 692 | memcpy(entry->dest, tt_local_entry->addr, ETH_ALEN); |
693 | entry->quality = 0; /* 0 means HNA */ | 693 | entry->quality = 0; /* 0 means TT */ |
694 | packet->entries++; | 694 | packet->entries++; |
695 | 695 | ||
696 | if (vis_packet_full(info)) { | 696 | if (vis_packet_full(info)) { |
697 | spin_unlock_bh(&bat_priv->hna_lhash_lock); | 697 | spin_unlock_bh(&bat_priv->tt_lhash_lock); |
698 | return 0; | 698 | return 0; |
699 | } | 699 | } |
700 | } | 700 | } |
701 | } | 701 | } |
702 | 702 | ||
703 | spin_unlock_bh(&bat_priv->hna_lhash_lock); | 703 | spin_unlock_bh(&bat_priv->tt_lhash_lock); |
704 | return 0; | 704 | return 0; |
705 | 705 | ||
706 | unlock: | 706 | unlock: |