diff options
author | David S. Miller <davem@davemloft.net> | 2011-03-07 03:37:13 -0500 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2011-03-07 03:37:13 -0500 |
commit | b8cec4a415e807a2f8679efa89558a040a7003de (patch) | |
tree | 1a5fc7c31c4f8b55ea850599bb7309871165f48e | |
parent | 5e2b61f78411be25f0b84f97d5b5d312f184dfd1 (diff) | |
parent | e44d8fe2b5c27ecc230f886d4cc49fcbd86f87a0 (diff) |
Merge branch 'batman-adv/next' of git://git.open-mesh.org/ecsv/linux-merge
-rw-r--r-- | net/batman-adv/aggregation.c | 8 | ||||
-rw-r--r-- | net/batman-adv/aggregation.h | 4 | ||||
-rw-r--r-- | net/batman-adv/bat_sysfs.c | 51 | ||||
-rw-r--r-- | net/batman-adv/gateway_client.c | 140 | ||||
-rw-r--r-- | net/batman-adv/hard-interface.c | 407 | ||||
-rw-r--r-- | net/batman-adv/hard-interface.h | 15 | ||||
-rw-r--r-- | net/batman-adv/hash.c | 26 | ||||
-rw-r--r-- | net/batman-adv/hash.h | 112 | ||||
-rw-r--r-- | net/batman-adv/icmp_socket.c | 40 | ||||
-rw-r--r-- | net/batman-adv/main.c | 13 | ||||
-rw-r--r-- | net/batman-adv/main.h | 12 | ||||
-rw-r--r-- | net/batman-adv/originator.c | 252 | ||||
-rw-r--r-- | net/batman-adv/originator.h | 50 | ||||
-rw-r--r-- | net/batman-adv/routing.c | 983 | ||||
-rw-r--r-- | net/batman-adv/routing.h | 25 | ||||
-rw-r--r-- | net/batman-adv/send.c | 103 | ||||
-rw-r--r-- | net/batman-adv/send.h | 8 | ||||
-rw-r--r-- | net/batman-adv/soft-interface.c | 74 | ||||
-rw-r--r-- | net/batman-adv/soft-interface.h | 3 | ||||
-rw-r--r-- | net/batman-adv/translation-table.c | 205 | ||||
-rw-r--r-- | net/batman-adv/types.h | 48 | ||||
-rw-r--r-- | net/batman-adv/unicast.c | 93 | ||||
-rw-r--r-- | net/batman-adv/unicast.h | 2 | ||||
-rw-r--r-- | net/batman-adv/vis.c | 192 |
24 files changed, 1634 insertions, 1232 deletions
diff --git a/net/batman-adv/aggregation.c b/net/batman-adv/aggregation.c index 1997725a243b..af45d6b2031f 100644 --- a/net/batman-adv/aggregation.c +++ b/net/batman-adv/aggregation.c | |||
@@ -35,7 +35,7 @@ static bool can_aggregate_with(struct batman_packet *new_batman_packet, | |||
35 | int packet_len, | 35 | int packet_len, |
36 | unsigned long send_time, | 36 | unsigned long send_time, |
37 | bool directlink, | 37 | bool directlink, |
38 | struct batman_if *if_incoming, | 38 | struct hard_iface *if_incoming, |
39 | struct forw_packet *forw_packet) | 39 | struct forw_packet *forw_packet) |
40 | { | 40 | { |
41 | struct batman_packet *batman_packet = | 41 | struct batman_packet *batman_packet = |
@@ -99,7 +99,7 @@ static bool can_aggregate_with(struct batman_packet *new_batman_packet, | |||
99 | /* create a new aggregated packet and add this packet to it */ | 99 | /* create a new aggregated packet and add this packet to it */ |
100 | static void new_aggregated_packet(unsigned char *packet_buff, int packet_len, | 100 | static void new_aggregated_packet(unsigned char *packet_buff, int packet_len, |
101 | unsigned long send_time, bool direct_link, | 101 | unsigned long send_time, bool direct_link, |
102 | struct batman_if *if_incoming, | 102 | struct hard_iface *if_incoming, |
103 | int own_packet) | 103 | int own_packet) |
104 | { | 104 | { |
105 | struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface); | 105 | struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface); |
@@ -188,7 +188,7 @@ static void aggregate(struct forw_packet *forw_packet_aggr, | |||
188 | 188 | ||
189 | void add_bat_packet_to_list(struct bat_priv *bat_priv, | 189 | void add_bat_packet_to_list(struct bat_priv *bat_priv, |
190 | unsigned char *packet_buff, int packet_len, | 190 | unsigned char *packet_buff, int packet_len, |
191 | struct batman_if *if_incoming, char own_packet, | 191 | struct hard_iface *if_incoming, char own_packet, |
192 | unsigned long send_time) | 192 | unsigned long send_time) |
193 | { | 193 | { |
194 | /** | 194 | /** |
@@ -247,7 +247,7 @@ void add_bat_packet_to_list(struct bat_priv *bat_priv, | |||
247 | 247 | ||
248 | /* unpack the aggregated packets and process them one by one */ | 248 | /* unpack the aggregated packets and process them one by one */ |
249 | void receive_aggr_bat_packet(struct ethhdr *ethhdr, unsigned char *packet_buff, | 249 | void receive_aggr_bat_packet(struct ethhdr *ethhdr, unsigned char *packet_buff, |
250 | int packet_len, struct batman_if *if_incoming) | 250 | int packet_len, struct hard_iface *if_incoming) |
251 | { | 251 | { |
252 | struct batman_packet *batman_packet; | 252 | struct batman_packet *batman_packet; |
253 | int buff_pos = 0; | 253 | int buff_pos = 0; |
diff --git a/net/batman-adv/aggregation.h b/net/batman-adv/aggregation.h index 6ce305b40017..062204289d1f 100644 --- a/net/batman-adv/aggregation.h +++ b/net/batman-adv/aggregation.h | |||
@@ -35,9 +35,9 @@ static inline int aggregated_packet(int buff_pos, int packet_len, int num_hna) | |||
35 | 35 | ||
36 | void add_bat_packet_to_list(struct bat_priv *bat_priv, | 36 | void add_bat_packet_to_list(struct bat_priv *bat_priv, |
37 | unsigned char *packet_buff, int packet_len, | 37 | unsigned char *packet_buff, int packet_len, |
38 | struct batman_if *if_incoming, char own_packet, | 38 | struct hard_iface *if_incoming, char own_packet, |
39 | unsigned long send_time); | 39 | unsigned long send_time); |
40 | void receive_aggr_bat_packet(struct ethhdr *ethhdr, unsigned char *packet_buff, | 40 | void receive_aggr_bat_packet(struct ethhdr *ethhdr, unsigned char *packet_buff, |
41 | int packet_len, struct batman_if *if_incoming); | 41 | int packet_len, struct hard_iface *if_incoming); |
42 | 42 | ||
43 | #endif /* _NET_BATMAN_ADV_AGGREGATION_H_ */ | 43 | #endif /* _NET_BATMAN_ADV_AGGREGATION_H_ */ |
diff --git a/net/batman-adv/bat_sysfs.c b/net/batman-adv/bat_sysfs.c index f7b93a0805fe..e449bf6353e0 100644 --- a/net/batman-adv/bat_sysfs.c +++ b/net/batman-adv/bat_sysfs.c | |||
@@ -441,16 +441,16 @@ static ssize_t show_mesh_iface(struct kobject *kobj, struct attribute *attr, | |||
441 | char *buff) | 441 | char *buff) |
442 | { | 442 | { |
443 | struct net_device *net_dev = kobj_to_netdev(kobj); | 443 | struct net_device *net_dev = kobj_to_netdev(kobj); |
444 | struct batman_if *batman_if = get_batman_if_by_netdev(net_dev); | 444 | struct hard_iface *hard_iface = hardif_get_by_netdev(net_dev); |
445 | ssize_t length; | 445 | ssize_t length; |
446 | 446 | ||
447 | if (!batman_if) | 447 | if (!hard_iface) |
448 | return 0; | 448 | return 0; |
449 | 449 | ||
450 | length = sprintf(buff, "%s\n", batman_if->if_status == IF_NOT_IN_USE ? | 450 | length = sprintf(buff, "%s\n", hard_iface->if_status == IF_NOT_IN_USE ? |
451 | "none" : batman_if->soft_iface->name); | 451 | "none" : hard_iface->soft_iface->name); |
452 | 452 | ||
453 | kref_put(&batman_if->refcount, hardif_free_ref); | 453 | hardif_free_ref(hard_iface); |
454 | 454 | ||
455 | return length; | 455 | return length; |
456 | } | 456 | } |
@@ -459,11 +459,11 @@ static ssize_t store_mesh_iface(struct kobject *kobj, struct attribute *attr, | |||
459 | char *buff, size_t count) | 459 | char *buff, size_t count) |
460 | { | 460 | { |
461 | struct net_device *net_dev = kobj_to_netdev(kobj); | 461 | struct net_device *net_dev = kobj_to_netdev(kobj); |
462 | struct batman_if *batman_if = get_batman_if_by_netdev(net_dev); | 462 | struct hard_iface *hard_iface = hardif_get_by_netdev(net_dev); |
463 | int status_tmp = -1; | 463 | int status_tmp = -1; |
464 | int ret; | 464 | int ret = count; |
465 | 465 | ||
466 | if (!batman_if) | 466 | if (!hard_iface) |
467 | return count; | 467 | return count; |
468 | 468 | ||
469 | if (buff[count - 1] == '\n') | 469 | if (buff[count - 1] == '\n') |
@@ -472,7 +472,7 @@ static ssize_t store_mesh_iface(struct kobject *kobj, struct attribute *attr, | |||
472 | if (strlen(buff) >= IFNAMSIZ) { | 472 | if (strlen(buff) >= IFNAMSIZ) { |
473 | pr_err("Invalid parameter for 'mesh_iface' setting received: " | 473 | pr_err("Invalid parameter for 'mesh_iface' setting received: " |
474 | "interface name too long '%s'\n", buff); | 474 | "interface name too long '%s'\n", buff); |
475 | kref_put(&batman_if->refcount, hardif_free_ref); | 475 | hardif_free_ref(hard_iface); |
476 | return -EINVAL; | 476 | return -EINVAL; |
477 | } | 477 | } |
478 | 478 | ||
@@ -481,30 +481,31 @@ static ssize_t store_mesh_iface(struct kobject *kobj, struct attribute *attr, | |||
481 | else | 481 | else |
482 | status_tmp = IF_I_WANT_YOU; | 482 | status_tmp = IF_I_WANT_YOU; |
483 | 483 | ||
484 | if ((batman_if->if_status == status_tmp) || ((batman_if->soft_iface) && | 484 | if (hard_iface->if_status == status_tmp) |
485 | (strncmp(batman_if->soft_iface->name, buff, IFNAMSIZ) == 0))) { | 485 | goto out; |
486 | kref_put(&batman_if->refcount, hardif_free_ref); | 486 | |
487 | return count; | 487 | if ((hard_iface->soft_iface) && |
488 | } | 488 | (strncmp(hard_iface->soft_iface->name, buff, IFNAMSIZ) == 0)) |
489 | goto out; | ||
489 | 490 | ||
490 | if (status_tmp == IF_NOT_IN_USE) { | 491 | if (status_tmp == IF_NOT_IN_USE) { |
491 | rtnl_lock(); | 492 | rtnl_lock(); |
492 | hardif_disable_interface(batman_if); | 493 | hardif_disable_interface(hard_iface); |
493 | rtnl_unlock(); | 494 | rtnl_unlock(); |
494 | kref_put(&batman_if->refcount, hardif_free_ref); | 495 | goto out; |
495 | return count; | ||
496 | } | 496 | } |
497 | 497 | ||
498 | /* if the interface already is in use */ | 498 | /* if the interface already is in use */ |
499 | if (batman_if->if_status != IF_NOT_IN_USE) { | 499 | if (hard_iface->if_status != IF_NOT_IN_USE) { |
500 | rtnl_lock(); | 500 | rtnl_lock(); |
501 | hardif_disable_interface(batman_if); | 501 | hardif_disable_interface(hard_iface); |
502 | rtnl_unlock(); | 502 | rtnl_unlock(); |
503 | } | 503 | } |
504 | 504 | ||
505 | ret = hardif_enable_interface(batman_if, buff); | 505 | ret = hardif_enable_interface(hard_iface, buff); |
506 | kref_put(&batman_if->refcount, hardif_free_ref); | ||
507 | 506 | ||
507 | out: | ||
508 | hardif_free_ref(hard_iface); | ||
508 | return ret; | 509 | return ret; |
509 | } | 510 | } |
510 | 511 | ||
@@ -512,13 +513,13 @@ static ssize_t show_iface_status(struct kobject *kobj, struct attribute *attr, | |||
512 | char *buff) | 513 | char *buff) |
513 | { | 514 | { |
514 | struct net_device *net_dev = kobj_to_netdev(kobj); | 515 | struct net_device *net_dev = kobj_to_netdev(kobj); |
515 | struct batman_if *batman_if = get_batman_if_by_netdev(net_dev); | 516 | struct hard_iface *hard_iface = hardif_get_by_netdev(net_dev); |
516 | ssize_t length; | 517 | ssize_t length; |
517 | 518 | ||
518 | if (!batman_if) | 519 | if (!hard_iface) |
519 | return 0; | 520 | return 0; |
520 | 521 | ||
521 | switch (batman_if->if_status) { | 522 | switch (hard_iface->if_status) { |
522 | case IF_TO_BE_REMOVED: | 523 | case IF_TO_BE_REMOVED: |
523 | length = sprintf(buff, "disabling\n"); | 524 | length = sprintf(buff, "disabling\n"); |
524 | break; | 525 | break; |
@@ -537,7 +538,7 @@ static ssize_t show_iface_status(struct kobject *kobj, struct attribute *attr, | |||
537 | break; | 538 | break; |
538 | } | 539 | } |
539 | 540 | ||
540 | kref_put(&batman_if->refcount, hardif_free_ref); | 541 | hardif_free_ref(hard_iface); |
541 | 542 | ||
542 | return length; | 543 | return length; |
543 | } | 544 | } |
diff --git a/net/batman-adv/gateway_client.c b/net/batman-adv/gateway_client.c index 429a013d2e0a..3cc43558cf9c 100644 --- a/net/batman-adv/gateway_client.c +++ b/net/batman-adv/gateway_client.c | |||
@@ -28,58 +28,75 @@ | |||
28 | #include <linux/udp.h> | 28 | #include <linux/udp.h> |
29 | #include <linux/if_vlan.h> | 29 | #include <linux/if_vlan.h> |
30 | 30 | ||
31 | static void gw_node_free_ref(struct kref *refcount) | 31 | static void gw_node_free_rcu(struct rcu_head *rcu) |
32 | { | 32 | { |
33 | struct gw_node *gw_node; | 33 | struct gw_node *gw_node; |
34 | 34 | ||
35 | gw_node = container_of(refcount, struct gw_node, refcount); | 35 | gw_node = container_of(rcu, struct gw_node, rcu); |
36 | kfree(gw_node); | 36 | kfree(gw_node); |
37 | } | 37 | } |
38 | 38 | ||
39 | static void gw_node_free_rcu(struct rcu_head *rcu) | 39 | static void gw_node_free_ref(struct gw_node *gw_node) |
40 | { | 40 | { |
41 | struct gw_node *gw_node; | 41 | if (atomic_dec_and_test(&gw_node->refcount)) |
42 | 42 | call_rcu(&gw_node->rcu, gw_node_free_rcu); | |
43 | gw_node = container_of(rcu, struct gw_node, rcu); | ||
44 | kref_put(&gw_node->refcount, gw_node_free_ref); | ||
45 | } | 43 | } |
46 | 44 | ||
47 | void *gw_get_selected(struct bat_priv *bat_priv) | 45 | void *gw_get_selected(struct bat_priv *bat_priv) |
48 | { | 46 | { |
49 | struct gw_node *curr_gateway_tmp = bat_priv->curr_gw; | 47 | struct gw_node *curr_gateway_tmp; |
48 | struct orig_node *orig_node = NULL; | ||
50 | 49 | ||
50 | rcu_read_lock(); | ||
51 | curr_gateway_tmp = rcu_dereference(bat_priv->curr_gw); | ||
51 | if (!curr_gateway_tmp) | 52 | if (!curr_gateway_tmp) |
52 | return NULL; | 53 | goto out; |
54 | |||
55 | orig_node = curr_gateway_tmp->orig_node; | ||
56 | if (!orig_node) | ||
57 | goto out; | ||
53 | 58 | ||
54 | return curr_gateway_tmp->orig_node; | 59 | if (!atomic_inc_not_zero(&orig_node->refcount)) |
60 | orig_node = NULL; | ||
61 | |||
62 | out: | ||
63 | rcu_read_unlock(); | ||
64 | return orig_node; | ||
55 | } | 65 | } |
56 | 66 | ||
57 | void gw_deselect(struct bat_priv *bat_priv) | 67 | void gw_deselect(struct bat_priv *bat_priv) |
58 | { | 68 | { |
59 | struct gw_node *gw_node = bat_priv->curr_gw; | 69 | struct gw_node *gw_node; |
60 | 70 | ||
61 | bat_priv->curr_gw = NULL; | 71 | spin_lock_bh(&bat_priv->gw_list_lock); |
72 | gw_node = rcu_dereference(bat_priv->curr_gw); | ||
73 | rcu_assign_pointer(bat_priv->curr_gw, NULL); | ||
74 | spin_unlock_bh(&bat_priv->gw_list_lock); | ||
62 | 75 | ||
63 | if (gw_node) | 76 | if (gw_node) |
64 | kref_put(&gw_node->refcount, gw_node_free_ref); | 77 | gw_node_free_ref(gw_node); |
65 | } | 78 | } |
66 | 79 | ||
67 | static struct gw_node *gw_select(struct bat_priv *bat_priv, | 80 | static void gw_select(struct bat_priv *bat_priv, struct gw_node *new_gw_node) |
68 | struct gw_node *new_gw_node) | ||
69 | { | 81 | { |
70 | struct gw_node *curr_gw_node = bat_priv->curr_gw; | 82 | struct gw_node *curr_gw_node; |
71 | 83 | ||
72 | if (new_gw_node) | 84 | if (new_gw_node && !atomic_inc_not_zero(&new_gw_node->refcount)) |
73 | kref_get(&new_gw_node->refcount); | 85 | new_gw_node = NULL; |
86 | |||
87 | spin_lock_bh(&bat_priv->gw_list_lock); | ||
88 | curr_gw_node = rcu_dereference(bat_priv->curr_gw); | ||
89 | rcu_assign_pointer(bat_priv->curr_gw, new_gw_node); | ||
90 | spin_unlock_bh(&bat_priv->gw_list_lock); | ||
74 | 91 | ||
75 | bat_priv->curr_gw = new_gw_node; | 92 | if (curr_gw_node) |
76 | return curr_gw_node; | 93 | gw_node_free_ref(curr_gw_node); |
77 | } | 94 | } |
78 | 95 | ||
79 | void gw_election(struct bat_priv *bat_priv) | 96 | void gw_election(struct bat_priv *bat_priv) |
80 | { | 97 | { |
81 | struct hlist_node *node; | 98 | struct hlist_node *node; |
82 | struct gw_node *gw_node, *curr_gw_tmp = NULL, *old_gw_node = NULL; | 99 | struct gw_node *gw_node, *curr_gw, *curr_gw_tmp = NULL; |
83 | uint8_t max_tq = 0; | 100 | uint8_t max_tq = 0; |
84 | uint32_t max_gw_factor = 0, tmp_gw_factor = 0; | 101 | uint32_t max_gw_factor = 0, tmp_gw_factor = 0; |
85 | int down, up; | 102 | int down, up; |
@@ -93,19 +110,23 @@ void gw_election(struct bat_priv *bat_priv) | |||
93 | if (atomic_read(&bat_priv->gw_mode) != GW_MODE_CLIENT) | 110 | if (atomic_read(&bat_priv->gw_mode) != GW_MODE_CLIENT) |
94 | return; | 111 | return; |
95 | 112 | ||
96 | if (bat_priv->curr_gw) | 113 | rcu_read_lock(); |
114 | curr_gw = rcu_dereference(bat_priv->curr_gw); | ||
115 | if (curr_gw) { | ||
116 | rcu_read_unlock(); | ||
97 | return; | 117 | return; |
118 | } | ||
98 | 119 | ||
99 | rcu_read_lock(); | ||
100 | if (hlist_empty(&bat_priv->gw_list)) { | 120 | if (hlist_empty(&bat_priv->gw_list)) { |
101 | rcu_read_unlock(); | ||
102 | 121 | ||
103 | if (bat_priv->curr_gw) { | 122 | if (curr_gw) { |
123 | rcu_read_unlock(); | ||
104 | bat_dbg(DBG_BATMAN, bat_priv, | 124 | bat_dbg(DBG_BATMAN, bat_priv, |
105 | "Removing selected gateway - " | 125 | "Removing selected gateway - " |
106 | "no gateway in range\n"); | 126 | "no gateway in range\n"); |
107 | gw_deselect(bat_priv); | 127 | gw_deselect(bat_priv); |
108 | } | 128 | } else |
129 | rcu_read_unlock(); | ||
109 | 130 | ||
110 | return; | 131 | return; |
111 | } | 132 | } |
@@ -154,12 +175,12 @@ void gw_election(struct bat_priv *bat_priv) | |||
154 | max_gw_factor = tmp_gw_factor; | 175 | max_gw_factor = tmp_gw_factor; |
155 | } | 176 | } |
156 | 177 | ||
157 | if (bat_priv->curr_gw != curr_gw_tmp) { | 178 | if (curr_gw != curr_gw_tmp) { |
158 | if ((bat_priv->curr_gw) && (!curr_gw_tmp)) | 179 | if ((curr_gw) && (!curr_gw_tmp)) |
159 | bat_dbg(DBG_BATMAN, bat_priv, | 180 | bat_dbg(DBG_BATMAN, bat_priv, |
160 | "Removing selected gateway - " | 181 | "Removing selected gateway - " |
161 | "no gateway in range\n"); | 182 | "no gateway in range\n"); |
162 | else if ((!bat_priv->curr_gw) && (curr_gw_tmp)) | 183 | else if ((!curr_gw) && (curr_gw_tmp)) |
163 | bat_dbg(DBG_BATMAN, bat_priv, | 184 | bat_dbg(DBG_BATMAN, bat_priv, |
164 | "Adding route to gateway %pM " | 185 | "Adding route to gateway %pM " |
165 | "(gw_flags: %i, tq: %i)\n", | 186 | "(gw_flags: %i, tq: %i)\n", |
@@ -174,43 +195,43 @@ void gw_election(struct bat_priv *bat_priv) | |||
174 | curr_gw_tmp->orig_node->gw_flags, | 195 | curr_gw_tmp->orig_node->gw_flags, |
175 | curr_gw_tmp->orig_node->router->tq_avg); | 196 | curr_gw_tmp->orig_node->router->tq_avg); |
176 | 197 | ||
177 | old_gw_node = gw_select(bat_priv, curr_gw_tmp); | 198 | gw_select(bat_priv, curr_gw_tmp); |
178 | } | 199 | } |
179 | 200 | ||
180 | rcu_read_unlock(); | 201 | rcu_read_unlock(); |
181 | |||
182 | /* the kfree() has to be outside of the rcu lock */ | ||
183 | if (old_gw_node) | ||
184 | kref_put(&old_gw_node->refcount, gw_node_free_ref); | ||
185 | } | 202 | } |
186 | 203 | ||
187 | void gw_check_election(struct bat_priv *bat_priv, struct orig_node *orig_node) | 204 | void gw_check_election(struct bat_priv *bat_priv, struct orig_node *orig_node) |
188 | { | 205 | { |
189 | struct gw_node *curr_gateway_tmp = bat_priv->curr_gw; | 206 | struct gw_node *curr_gateway_tmp; |
190 | uint8_t gw_tq_avg, orig_tq_avg; | 207 | uint8_t gw_tq_avg, orig_tq_avg; |
191 | 208 | ||
209 | rcu_read_lock(); | ||
210 | curr_gateway_tmp = rcu_dereference(bat_priv->curr_gw); | ||
192 | if (!curr_gateway_tmp) | 211 | if (!curr_gateway_tmp) |
193 | return; | 212 | goto out_rcu; |
194 | 213 | ||
195 | if (!curr_gateway_tmp->orig_node) | 214 | if (!curr_gateway_tmp->orig_node) |
196 | goto deselect; | 215 | goto deselect_rcu; |
197 | 216 | ||
198 | if (!curr_gateway_tmp->orig_node->router) | 217 | if (!curr_gateway_tmp->orig_node->router) |
199 | goto deselect; | 218 | goto deselect_rcu; |
200 | 219 | ||
201 | /* this node already is the gateway */ | 220 | /* this node already is the gateway */ |
202 | if (curr_gateway_tmp->orig_node == orig_node) | 221 | if (curr_gateway_tmp->orig_node == orig_node) |
203 | return; | 222 | goto out_rcu; |
204 | 223 | ||
205 | if (!orig_node->router) | 224 | if (!orig_node->router) |
206 | return; | 225 | goto out_rcu; |
207 | 226 | ||
208 | gw_tq_avg = curr_gateway_tmp->orig_node->router->tq_avg; | 227 | gw_tq_avg = curr_gateway_tmp->orig_node->router->tq_avg; |
228 | rcu_read_unlock(); | ||
229 | |||
209 | orig_tq_avg = orig_node->router->tq_avg; | 230 | orig_tq_avg = orig_node->router->tq_avg; |
210 | 231 | ||
211 | /* the TQ value has to be better */ | 232 | /* the TQ value has to be better */ |
212 | if (orig_tq_avg < gw_tq_avg) | 233 | if (orig_tq_avg < gw_tq_avg) |
213 | return; | 234 | goto out; |
214 | 235 | ||
215 | /** | 236 | /** |
216 | * if the routing class is greater than 3 the value tells us how much | 237 | * if the routing class is greater than 3 the value tells us how much |
@@ -218,15 +239,23 @@ void gw_check_election(struct bat_priv *bat_priv, struct orig_node *orig_node) | |||
218 | **/ | 239 | **/ |
219 | if ((atomic_read(&bat_priv->gw_sel_class) > 3) && | 240 | if ((atomic_read(&bat_priv->gw_sel_class) > 3) && |
220 | (orig_tq_avg - gw_tq_avg < atomic_read(&bat_priv->gw_sel_class))) | 241 | (orig_tq_avg - gw_tq_avg < atomic_read(&bat_priv->gw_sel_class))) |
221 | return; | 242 | goto out; |
222 | 243 | ||
223 | bat_dbg(DBG_BATMAN, bat_priv, | 244 | bat_dbg(DBG_BATMAN, bat_priv, |
224 | "Restarting gateway selection: better gateway found (tq curr: " | 245 | "Restarting gateway selection: better gateway found (tq curr: " |
225 | "%i, tq new: %i)\n", | 246 | "%i, tq new: %i)\n", |
226 | gw_tq_avg, orig_tq_avg); | 247 | gw_tq_avg, orig_tq_avg); |
248 | goto deselect; | ||
227 | 249 | ||
250 | out_rcu: | ||
251 | rcu_read_unlock(); | ||
252 | goto out; | ||
253 | deselect_rcu: | ||
254 | rcu_read_unlock(); | ||
228 | deselect: | 255 | deselect: |
229 | gw_deselect(bat_priv); | 256 | gw_deselect(bat_priv); |
257 | out: | ||
258 | return; | ||
230 | } | 259 | } |
231 | 260 | ||
232 | static void gw_node_add(struct bat_priv *bat_priv, | 261 | static void gw_node_add(struct bat_priv *bat_priv, |
@@ -242,7 +271,7 @@ static void gw_node_add(struct bat_priv *bat_priv, | |||
242 | memset(gw_node, 0, sizeof(struct gw_node)); | 271 | memset(gw_node, 0, sizeof(struct gw_node)); |
243 | INIT_HLIST_NODE(&gw_node->list); | 272 | INIT_HLIST_NODE(&gw_node->list); |
244 | gw_node->orig_node = orig_node; | 273 | gw_node->orig_node = orig_node; |
245 | kref_init(&gw_node->refcount); | 274 | atomic_set(&gw_node->refcount, 1); |
246 | 275 | ||
247 | spin_lock_bh(&bat_priv->gw_list_lock); | 276 | spin_lock_bh(&bat_priv->gw_list_lock); |
248 | hlist_add_head_rcu(&gw_node->list, &bat_priv->gw_list); | 277 | hlist_add_head_rcu(&gw_node->list, &bat_priv->gw_list); |
@@ -283,7 +312,7 @@ void gw_node_update(struct bat_priv *bat_priv, | |||
283 | "Gateway %pM removed from gateway list\n", | 312 | "Gateway %pM removed from gateway list\n", |
284 | orig_node->orig); | 313 | orig_node->orig); |
285 | 314 | ||
286 | if (gw_node == bat_priv->curr_gw) { | 315 | if (gw_node == rcu_dereference(bat_priv->curr_gw)) { |
287 | rcu_read_unlock(); | 316 | rcu_read_unlock(); |
288 | gw_deselect(bat_priv); | 317 | gw_deselect(bat_priv); |
289 | return; | 318 | return; |
@@ -321,11 +350,11 @@ void gw_node_purge(struct bat_priv *bat_priv) | |||
321 | atomic_read(&bat_priv->mesh_state) == MESH_ACTIVE) | 350 | atomic_read(&bat_priv->mesh_state) == MESH_ACTIVE) |
322 | continue; | 351 | continue; |
323 | 352 | ||
324 | if (bat_priv->curr_gw == gw_node) | 353 | if (rcu_dereference(bat_priv->curr_gw) == gw_node) |
325 | gw_deselect(bat_priv); | 354 | gw_deselect(bat_priv); |
326 | 355 | ||
327 | hlist_del_rcu(&gw_node->list); | 356 | hlist_del_rcu(&gw_node->list); |
328 | call_rcu(&gw_node->rcu, gw_node_free_rcu); | 357 | gw_node_free_ref(gw_node); |
329 | } | 358 | } |
330 | 359 | ||
331 | 360 | ||
@@ -335,12 +364,16 @@ void gw_node_purge(struct bat_priv *bat_priv) | |||
335 | static int _write_buffer_text(struct bat_priv *bat_priv, | 364 | static int _write_buffer_text(struct bat_priv *bat_priv, |
336 | struct seq_file *seq, struct gw_node *gw_node) | 365 | struct seq_file *seq, struct gw_node *gw_node) |
337 | { | 366 | { |
338 | int down, up; | 367 | struct gw_node *curr_gw; |
368 | int down, up, ret; | ||
339 | 369 | ||
340 | gw_bandwidth_to_kbit(gw_node->orig_node->gw_flags, &down, &up); | 370 | gw_bandwidth_to_kbit(gw_node->orig_node->gw_flags, &down, &up); |
341 | 371 | ||
342 | return seq_printf(seq, "%s %pM (%3i) %pM [%10s]: %3i - %i%s/%i%s\n", | 372 | rcu_read_lock(); |
343 | (bat_priv->curr_gw == gw_node ? "=>" : " "), | 373 | curr_gw = rcu_dereference(bat_priv->curr_gw); |
374 | |||
375 | ret = seq_printf(seq, "%s %pM (%3i) %pM [%10s]: %3i - %i%s/%i%s\n", | ||
376 | (curr_gw == gw_node ? "=>" : " "), | ||
344 | gw_node->orig_node->orig, | 377 | gw_node->orig_node->orig, |
345 | gw_node->orig_node->router->tq_avg, | 378 | gw_node->orig_node->router->tq_avg, |
346 | gw_node->orig_node->router->addr, | 379 | gw_node->orig_node->router->addr, |
@@ -350,6 +383,9 @@ static int _write_buffer_text(struct bat_priv *bat_priv, | |||
350 | (down > 2048 ? "MBit" : "KBit"), | 383 | (down > 2048 ? "MBit" : "KBit"), |
351 | (up > 2048 ? up / 1024 : up), | 384 | (up > 2048 ? up / 1024 : up), |
352 | (up > 2048 ? "MBit" : "KBit")); | 385 | (up > 2048 ? "MBit" : "KBit")); |
386 | |||
387 | rcu_read_unlock(); | ||
388 | return ret; | ||
353 | } | 389 | } |
354 | 390 | ||
355 | int gw_client_seq_print_text(struct seq_file *seq, void *offset) | 391 | int gw_client_seq_print_text(struct seq_file *seq, void *offset) |
@@ -470,8 +506,12 @@ int gw_is_target(struct bat_priv *bat_priv, struct sk_buff *skb) | |||
470 | if (atomic_read(&bat_priv->gw_mode) == GW_MODE_SERVER) | 506 | if (atomic_read(&bat_priv->gw_mode) == GW_MODE_SERVER) |
471 | return -1; | 507 | return -1; |
472 | 508 | ||
473 | if (!bat_priv->curr_gw) | 509 | rcu_read_lock(); |
510 | if (!rcu_dereference(bat_priv->curr_gw)) { | ||
511 | rcu_read_unlock(); | ||
474 | return 0; | 512 | return 0; |
513 | } | ||
514 | rcu_read_unlock(); | ||
475 | 515 | ||
476 | return 1; | 516 | return 1; |
477 | } | 517 | } |
diff --git a/net/batman-adv/hard-interface.c b/net/batman-adv/hard-interface.c index f2131f45aa9b..b3058e46ee6b 100644 --- a/net/batman-adv/hard-interface.c +++ b/net/batman-adv/hard-interface.c | |||
@@ -31,8 +31,8 @@ | |||
31 | 31 | ||
32 | #include <linux/if_arp.h> | 32 | #include <linux/if_arp.h> |
33 | 33 | ||
34 | /* protect update critical side of if_list - but not the content */ | 34 | /* protect update critical side of hardif_list - but not the content */ |
35 | static DEFINE_SPINLOCK(if_list_lock); | 35 | static DEFINE_SPINLOCK(hardif_list_lock); |
36 | 36 | ||
37 | 37 | ||
38 | static int batman_skb_recv(struct sk_buff *skb, | 38 | static int batman_skb_recv(struct sk_buff *skb, |
@@ -40,33 +40,31 @@ static int batman_skb_recv(struct sk_buff *skb, | |||
40 | struct packet_type *ptype, | 40 | struct packet_type *ptype, |
41 | struct net_device *orig_dev); | 41 | struct net_device *orig_dev); |
42 | 42 | ||
43 | static void hardif_free_rcu(struct rcu_head *rcu) | 43 | void hardif_free_rcu(struct rcu_head *rcu) |
44 | { | 44 | { |
45 | struct batman_if *batman_if; | 45 | struct hard_iface *hard_iface; |
46 | 46 | ||
47 | batman_if = container_of(rcu, struct batman_if, rcu); | 47 | hard_iface = container_of(rcu, struct hard_iface, rcu); |
48 | dev_put(batman_if->net_dev); | 48 | dev_put(hard_iface->net_dev); |
49 | kref_put(&batman_if->refcount, hardif_free_ref); | 49 | kfree(hard_iface); |
50 | } | 50 | } |
51 | 51 | ||
52 | struct batman_if *get_batman_if_by_netdev(struct net_device *net_dev) | 52 | struct hard_iface *hardif_get_by_netdev(struct net_device *net_dev) |
53 | { | 53 | { |
54 | struct batman_if *batman_if; | 54 | struct hard_iface *hard_iface; |
55 | 55 | ||
56 | rcu_read_lock(); | 56 | rcu_read_lock(); |
57 | list_for_each_entry_rcu(batman_if, &if_list, list) { | 57 | list_for_each_entry_rcu(hard_iface, &hardif_list, list) { |
58 | if (batman_if->net_dev == net_dev) | 58 | if (hard_iface->net_dev == net_dev && |
59 | atomic_inc_not_zero(&hard_iface->refcount)) | ||
59 | goto out; | 60 | goto out; |
60 | } | 61 | } |
61 | 62 | ||
62 | batman_if = NULL; | 63 | hard_iface = NULL; |
63 | 64 | ||
64 | out: | 65 | out: |
65 | if (batman_if) | ||
66 | kref_get(&batman_if->refcount); | ||
67 | |||
68 | rcu_read_unlock(); | 66 | rcu_read_unlock(); |
69 | return batman_if; | 67 | return hard_iface; |
70 | } | 68 | } |
71 | 69 | ||
72 | static int is_valid_iface(struct net_device *net_dev) | 70 | static int is_valid_iface(struct net_device *net_dev) |
@@ -81,13 +79,8 @@ static int is_valid_iface(struct net_device *net_dev) | |||
81 | return 0; | 79 | return 0; |
82 | 80 | ||
83 | /* no batman over batman */ | 81 | /* no batman over batman */ |
84 | #ifdef HAVE_NET_DEVICE_OPS | 82 | if (softif_is_valid(net_dev)) |
85 | if (net_dev->netdev_ops->ndo_start_xmit == interface_tx) | ||
86 | return 0; | ||
87 | #else | ||
88 | if (net_dev->hard_start_xmit == interface_tx) | ||
89 | return 0; | 83 | return 0; |
90 | #endif | ||
91 | 84 | ||
92 | /* Device is being bridged */ | 85 | /* Device is being bridged */ |
93 | /* if (net_dev->priv_flags & IFF_BRIDGE_PORT) | 86 | /* if (net_dev->priv_flags & IFF_BRIDGE_PORT) |
@@ -96,27 +89,25 @@ static int is_valid_iface(struct net_device *net_dev) | |||
96 | return 1; | 89 | return 1; |
97 | } | 90 | } |
98 | 91 | ||
99 | static struct batman_if *get_active_batman_if(struct net_device *soft_iface) | 92 | static struct hard_iface *hardif_get_active(struct net_device *soft_iface) |
100 | { | 93 | { |
101 | struct batman_if *batman_if; | 94 | struct hard_iface *hard_iface; |
102 | 95 | ||
103 | rcu_read_lock(); | 96 | rcu_read_lock(); |
104 | list_for_each_entry_rcu(batman_if, &if_list, list) { | 97 | list_for_each_entry_rcu(hard_iface, &hardif_list, list) { |
105 | if (batman_if->soft_iface != soft_iface) | 98 | if (hard_iface->soft_iface != soft_iface) |
106 | continue; | 99 | continue; |
107 | 100 | ||
108 | if (batman_if->if_status == IF_ACTIVE) | 101 | if (hard_iface->if_status == IF_ACTIVE && |
102 | atomic_inc_not_zero(&hard_iface->refcount)) | ||
109 | goto out; | 103 | goto out; |
110 | } | 104 | } |
111 | 105 | ||
112 | batman_if = NULL; | 106 | hard_iface = NULL; |
113 | 107 | ||
114 | out: | 108 | out: |
115 | if (batman_if) | ||
116 | kref_get(&batman_if->refcount); | ||
117 | |||
118 | rcu_read_unlock(); | 109 | rcu_read_unlock(); |
119 | return batman_if; | 110 | return hard_iface; |
120 | } | 111 | } |
121 | 112 | ||
122 | static void update_primary_addr(struct bat_priv *bat_priv) | 113 | static void update_primary_addr(struct bat_priv *bat_priv) |
@@ -132,24 +123,24 @@ static void update_primary_addr(struct bat_priv *bat_priv) | |||
132 | } | 123 | } |
133 | 124 | ||
134 | static void set_primary_if(struct bat_priv *bat_priv, | 125 | static void set_primary_if(struct bat_priv *bat_priv, |
135 | struct batman_if *batman_if) | 126 | struct hard_iface *hard_iface) |
136 | { | 127 | { |
137 | struct batman_packet *batman_packet; | 128 | struct batman_packet *batman_packet; |
138 | struct batman_if *old_if; | 129 | struct hard_iface *old_if; |
139 | 130 | ||
140 | if (batman_if) | 131 | if (hard_iface && !atomic_inc_not_zero(&hard_iface->refcount)) |
141 | kref_get(&batman_if->refcount); | 132 | hard_iface = NULL; |
142 | 133 | ||
143 | old_if = bat_priv->primary_if; | 134 | old_if = bat_priv->primary_if; |
144 | bat_priv->primary_if = batman_if; | 135 | bat_priv->primary_if = hard_iface; |
145 | 136 | ||
146 | if (old_if) | 137 | if (old_if) |
147 | kref_put(&old_if->refcount, hardif_free_ref); | 138 | hardif_free_ref(old_if); |
148 | 139 | ||
149 | if (!bat_priv->primary_if) | 140 | if (!bat_priv->primary_if) |
150 | return; | 141 | return; |
151 | 142 | ||
152 | batman_packet = (struct batman_packet *)(batman_if->packet_buff); | 143 | batman_packet = (struct batman_packet *)(hard_iface->packet_buff); |
153 | batman_packet->flags = PRIMARIES_FIRST_HOP; | 144 | batman_packet->flags = PRIMARIES_FIRST_HOP; |
154 | batman_packet->ttl = TTL; | 145 | batman_packet->ttl = TTL; |
155 | 146 | ||
@@ -162,42 +153,42 @@ static void set_primary_if(struct bat_priv *bat_priv, | |||
162 | atomic_set(&bat_priv->hna_local_changed, 1); | 153 | atomic_set(&bat_priv->hna_local_changed, 1); |
163 | } | 154 | } |
164 | 155 | ||
165 | static bool hardif_is_iface_up(struct batman_if *batman_if) | 156 | static bool hardif_is_iface_up(struct hard_iface *hard_iface) |
166 | { | 157 | { |
167 | if (batman_if->net_dev->flags & IFF_UP) | 158 | if (hard_iface->net_dev->flags & IFF_UP) |
168 | return true; | 159 | return true; |
169 | 160 | ||
170 | return false; | 161 | return false; |
171 | } | 162 | } |
172 | 163 | ||
173 | static void update_mac_addresses(struct batman_if *batman_if) | 164 | static void update_mac_addresses(struct hard_iface *hard_iface) |
174 | { | 165 | { |
175 | memcpy(((struct batman_packet *)(batman_if->packet_buff))->orig, | 166 | memcpy(((struct batman_packet *)(hard_iface->packet_buff))->orig, |
176 | batman_if->net_dev->dev_addr, ETH_ALEN); | 167 | hard_iface->net_dev->dev_addr, ETH_ALEN); |
177 | memcpy(((struct batman_packet *)(batman_if->packet_buff))->prev_sender, | 168 | memcpy(((struct batman_packet *)(hard_iface->packet_buff))->prev_sender, |
178 | batman_if->net_dev->dev_addr, ETH_ALEN); | 169 | hard_iface->net_dev->dev_addr, ETH_ALEN); |
179 | } | 170 | } |
180 | 171 | ||
181 | static void check_known_mac_addr(struct net_device *net_dev) | 172 | static void check_known_mac_addr(struct net_device *net_dev) |
182 | { | 173 | { |
183 | struct batman_if *batman_if; | 174 | struct hard_iface *hard_iface; |
184 | 175 | ||
185 | rcu_read_lock(); | 176 | rcu_read_lock(); |
186 | list_for_each_entry_rcu(batman_if, &if_list, list) { | 177 | list_for_each_entry_rcu(hard_iface, &hardif_list, list) { |
187 | if ((batman_if->if_status != IF_ACTIVE) && | 178 | if ((hard_iface->if_status != IF_ACTIVE) && |
188 | (batman_if->if_status != IF_TO_BE_ACTIVATED)) | 179 | (hard_iface->if_status != IF_TO_BE_ACTIVATED)) |
189 | continue; | 180 | continue; |
190 | 181 | ||
191 | if (batman_if->net_dev == net_dev) | 182 | if (hard_iface->net_dev == net_dev) |
192 | continue; | 183 | continue; |
193 | 184 | ||
194 | if (!compare_orig(batman_if->net_dev->dev_addr, | 185 | if (!compare_eth(hard_iface->net_dev->dev_addr, |
195 | net_dev->dev_addr)) | 186 | net_dev->dev_addr)) |
196 | continue; | 187 | continue; |
197 | 188 | ||
198 | pr_warning("The newly added mac address (%pM) already exists " | 189 | pr_warning("The newly added mac address (%pM) already exists " |
199 | "on: %s\n", net_dev->dev_addr, | 190 | "on: %s\n", net_dev->dev_addr, |
200 | batman_if->net_dev->name); | 191 | hard_iface->net_dev->name); |
201 | pr_warning("It is strongly recommended to keep mac addresses " | 192 | pr_warning("It is strongly recommended to keep mac addresses " |
202 | "unique to avoid problems!\n"); | 193 | "unique to avoid problems!\n"); |
203 | } | 194 | } |
@@ -207,7 +198,7 @@ static void check_known_mac_addr(struct net_device *net_dev) | |||
207 | int hardif_min_mtu(struct net_device *soft_iface) | 198 | int hardif_min_mtu(struct net_device *soft_iface) |
208 | { | 199 | { |
209 | struct bat_priv *bat_priv = netdev_priv(soft_iface); | 200 | struct bat_priv *bat_priv = netdev_priv(soft_iface); |
210 | struct batman_if *batman_if; | 201 | struct hard_iface *hard_iface; |
211 | /* allow big frames if all devices are capable to do so | 202 | /* allow big frames if all devices are capable to do so |
212 | * (have MTU > 1500 + BAT_HEADER_LEN) */ | 203 | * (have MTU > 1500 + BAT_HEADER_LEN) */ |
213 | int min_mtu = ETH_DATA_LEN; | 204 | int min_mtu = ETH_DATA_LEN; |
@@ -216,15 +207,15 @@ int hardif_min_mtu(struct net_device *soft_iface) | |||
216 | goto out; | 207 | goto out; |
217 | 208 | ||
218 | rcu_read_lock(); | 209 | rcu_read_lock(); |
219 | list_for_each_entry_rcu(batman_if, &if_list, list) { | 210 | list_for_each_entry_rcu(hard_iface, &hardif_list, list) { |
220 | if ((batman_if->if_status != IF_ACTIVE) && | 211 | if ((hard_iface->if_status != IF_ACTIVE) && |
221 | (batman_if->if_status != IF_TO_BE_ACTIVATED)) | 212 | (hard_iface->if_status != IF_TO_BE_ACTIVATED)) |
222 | continue; | 213 | continue; |
223 | 214 | ||
224 | if (batman_if->soft_iface != soft_iface) | 215 | if (hard_iface->soft_iface != soft_iface) |
225 | continue; | 216 | continue; |
226 | 217 | ||
227 | min_mtu = min_t(int, batman_if->net_dev->mtu - BAT_HEADER_LEN, | 218 | min_mtu = min_t(int, hard_iface->net_dev->mtu - BAT_HEADER_LEN, |
228 | min_mtu); | 219 | min_mtu); |
229 | } | 220 | } |
230 | rcu_read_unlock(); | 221 | rcu_read_unlock(); |
@@ -242,77 +233,95 @@ void update_min_mtu(struct net_device *soft_iface) | |||
242 | soft_iface->mtu = min_mtu; | 233 | soft_iface->mtu = min_mtu; |
243 | } | 234 | } |
244 | 235 | ||
245 | static void hardif_activate_interface(struct batman_if *batman_if) | 236 | static void hardif_activate_interface(struct hard_iface *hard_iface) |
246 | { | 237 | { |
247 | struct bat_priv *bat_priv; | 238 | struct bat_priv *bat_priv; |
248 | 239 | ||
249 | if (batman_if->if_status != IF_INACTIVE) | 240 | if (hard_iface->if_status != IF_INACTIVE) |
250 | return; | 241 | return; |
251 | 242 | ||
252 | bat_priv = netdev_priv(batman_if->soft_iface); | 243 | bat_priv = netdev_priv(hard_iface->soft_iface); |
253 | 244 | ||
254 | update_mac_addresses(batman_if); | 245 | update_mac_addresses(hard_iface); |
255 | batman_if->if_status = IF_TO_BE_ACTIVATED; | 246 | hard_iface->if_status = IF_TO_BE_ACTIVATED; |
256 | 247 | ||
257 | /** | 248 | /** |
258 | * the first active interface becomes our primary interface or | 249 | * the first active interface becomes our primary interface or |
259 | * the next active interface after the old primay interface was removed | 250 | * the next active interface after the old primay interface was removed |
260 | */ | 251 | */ |
261 | if (!bat_priv->primary_if) | 252 | if (!bat_priv->primary_if) |
262 | set_primary_if(bat_priv, batman_if); | 253 | set_primary_if(bat_priv, hard_iface); |
263 | 254 | ||
264 | bat_info(batman_if->soft_iface, "Interface activated: %s\n", | 255 | bat_info(hard_iface->soft_iface, "Interface activated: %s\n", |
265 | batman_if->net_dev->name); | 256 | hard_iface->net_dev->name); |
266 | 257 | ||
267 | update_min_mtu(batman_if->soft_iface); | 258 | update_min_mtu(hard_iface->soft_iface); |
268 | return; | 259 | return; |
269 | } | 260 | } |
270 | 261 | ||
271 | static void hardif_deactivate_interface(struct batman_if *batman_if) | 262 | static void hardif_deactivate_interface(struct hard_iface *hard_iface) |
272 | { | 263 | { |
273 | if ((batman_if->if_status != IF_ACTIVE) && | 264 | if ((hard_iface->if_status != IF_ACTIVE) && |
274 | (batman_if->if_status != IF_TO_BE_ACTIVATED)) | 265 | (hard_iface->if_status != IF_TO_BE_ACTIVATED)) |
275 | return; | 266 | return; |
276 | 267 | ||
277 | batman_if->if_status = IF_INACTIVE; | 268 | hard_iface->if_status = IF_INACTIVE; |
278 | 269 | ||
279 | bat_info(batman_if->soft_iface, "Interface deactivated: %s\n", | 270 | bat_info(hard_iface->soft_iface, "Interface deactivated: %s\n", |
280 | batman_if->net_dev->name); | 271 | hard_iface->net_dev->name); |
281 | 272 | ||
282 | update_min_mtu(batman_if->soft_iface); | 273 | update_min_mtu(hard_iface->soft_iface); |
283 | } | 274 | } |
284 | 275 | ||
285 | int hardif_enable_interface(struct batman_if *batman_if, char *iface_name) | 276 | int hardif_enable_interface(struct hard_iface *hard_iface, char *iface_name) |
286 | { | 277 | { |
287 | struct bat_priv *bat_priv; | 278 | struct bat_priv *bat_priv; |
288 | struct batman_packet *batman_packet; | 279 | struct batman_packet *batman_packet; |
280 | struct net_device *soft_iface; | ||
281 | int ret; | ||
282 | |||
283 | if (hard_iface->if_status != IF_NOT_IN_USE) | ||
284 | goto out; | ||
289 | 285 | ||
290 | if (batman_if->if_status != IF_NOT_IN_USE) | 286 | if (!atomic_inc_not_zero(&hard_iface->refcount)) |
291 | goto out; | 287 | goto out; |
292 | 288 | ||
293 | batman_if->soft_iface = dev_get_by_name(&init_net, iface_name); | 289 | soft_iface = dev_get_by_name(&init_net, iface_name); |
294 | 290 | ||
295 | if (!batman_if->soft_iface) { | 291 | if (!soft_iface) { |
296 | batman_if->soft_iface = softif_create(iface_name); | 292 | soft_iface = softif_create(iface_name); |
297 | 293 | ||
298 | if (!batman_if->soft_iface) | 294 | if (!soft_iface) { |
295 | ret = -ENOMEM; | ||
299 | goto err; | 296 | goto err; |
297 | } | ||
300 | 298 | ||
301 | /* dev_get_by_name() increases the reference counter for us */ | 299 | /* dev_get_by_name() increases the reference counter for us */ |
302 | dev_hold(batman_if->soft_iface); | 300 | dev_hold(soft_iface); |
303 | } | 301 | } |
304 | 302 | ||
305 | bat_priv = netdev_priv(batman_if->soft_iface); | 303 | if (!softif_is_valid(soft_iface)) { |
306 | batman_if->packet_len = BAT_PACKET_LEN; | 304 | pr_err("Can't create batman mesh interface %s: " |
307 | batman_if->packet_buff = kmalloc(batman_if->packet_len, GFP_ATOMIC); | 305 | "already exists as regular interface\n", |
306 | soft_iface->name); | ||
307 | dev_put(soft_iface); | ||
308 | ret = -EINVAL; | ||
309 | goto err; | ||
310 | } | ||
311 | |||
312 | hard_iface->soft_iface = soft_iface; | ||
313 | bat_priv = netdev_priv(hard_iface->soft_iface); | ||
314 | hard_iface->packet_len = BAT_PACKET_LEN; | ||
315 | hard_iface->packet_buff = kmalloc(hard_iface->packet_len, GFP_ATOMIC); | ||
308 | 316 | ||
309 | if (!batman_if->packet_buff) { | 317 | if (!hard_iface->packet_buff) { |
310 | bat_err(batman_if->soft_iface, "Can't add interface packet " | 318 | bat_err(hard_iface->soft_iface, "Can't add interface packet " |
311 | "(%s): out of memory\n", batman_if->net_dev->name); | 319 | "(%s): out of memory\n", hard_iface->net_dev->name); |
320 | ret = -ENOMEM; | ||
312 | goto err; | 321 | goto err; |
313 | } | 322 | } |
314 | 323 | ||
315 | batman_packet = (struct batman_packet *)(batman_if->packet_buff); | 324 | batman_packet = (struct batman_packet *)(hard_iface->packet_buff); |
316 | batman_packet->packet_type = BAT_PACKET; | 325 | batman_packet->packet_type = BAT_PACKET; |
317 | batman_packet->version = COMPAT_VERSION; | 326 | batman_packet->version = COMPAT_VERSION; |
318 | batman_packet->flags = 0; | 327 | batman_packet->flags = 0; |
@@ -320,107 +329,107 @@ int hardif_enable_interface(struct batman_if *batman_if, char *iface_name) | |||
320 | batman_packet->tq = TQ_MAX_VALUE; | 329 | batman_packet->tq = TQ_MAX_VALUE; |
321 | batman_packet->num_hna = 0; | 330 | batman_packet->num_hna = 0; |
322 | 331 | ||
323 | batman_if->if_num = bat_priv->num_ifaces; | 332 | hard_iface->if_num = bat_priv->num_ifaces; |
324 | bat_priv->num_ifaces++; | 333 | bat_priv->num_ifaces++; |
325 | batman_if->if_status = IF_INACTIVE; | 334 | hard_iface->if_status = IF_INACTIVE; |
326 | orig_hash_add_if(batman_if, bat_priv->num_ifaces); | 335 | orig_hash_add_if(hard_iface, bat_priv->num_ifaces); |
327 | 336 | ||
328 | batman_if->batman_adv_ptype.type = __constant_htons(ETH_P_BATMAN); | 337 | hard_iface->batman_adv_ptype.type = __constant_htons(ETH_P_BATMAN); |
329 | batman_if->batman_adv_ptype.func = batman_skb_recv; | 338 | hard_iface->batman_adv_ptype.func = batman_skb_recv; |
330 | batman_if->batman_adv_ptype.dev = batman_if->net_dev; | 339 | hard_iface->batman_adv_ptype.dev = hard_iface->net_dev; |
331 | kref_get(&batman_if->refcount); | 340 | dev_add_pack(&hard_iface->batman_adv_ptype); |
332 | dev_add_pack(&batman_if->batman_adv_ptype); | ||
333 | 341 | ||
334 | atomic_set(&batman_if->seqno, 1); | 342 | atomic_set(&hard_iface->seqno, 1); |
335 | atomic_set(&batman_if->frag_seqno, 1); | 343 | atomic_set(&hard_iface->frag_seqno, 1); |
336 | bat_info(batman_if->soft_iface, "Adding interface: %s\n", | 344 | bat_info(hard_iface->soft_iface, "Adding interface: %s\n", |
337 | batman_if->net_dev->name); | 345 | hard_iface->net_dev->name); |
338 | 346 | ||
339 | if (atomic_read(&bat_priv->fragmentation) && batman_if->net_dev->mtu < | 347 | if (atomic_read(&bat_priv->fragmentation) && hard_iface->net_dev->mtu < |
340 | ETH_DATA_LEN + BAT_HEADER_LEN) | 348 | ETH_DATA_LEN + BAT_HEADER_LEN) |
341 | bat_info(batman_if->soft_iface, | 349 | bat_info(hard_iface->soft_iface, |
342 | "The MTU of interface %s is too small (%i) to handle " | 350 | "The MTU of interface %s is too small (%i) to handle " |
343 | "the transport of batman-adv packets. Packets going " | 351 | "the transport of batman-adv packets. Packets going " |
344 | "over this interface will be fragmented on layer2 " | 352 | "over this interface will be fragmented on layer2 " |
345 | "which could impact the performance. Setting the MTU " | 353 | "which could impact the performance. Setting the MTU " |
346 | "to %zi would solve the problem.\n", | 354 | "to %zi would solve the problem.\n", |
347 | batman_if->net_dev->name, batman_if->net_dev->mtu, | 355 | hard_iface->net_dev->name, hard_iface->net_dev->mtu, |
348 | ETH_DATA_LEN + BAT_HEADER_LEN); | 356 | ETH_DATA_LEN + BAT_HEADER_LEN); |
349 | 357 | ||
350 | if (!atomic_read(&bat_priv->fragmentation) && batman_if->net_dev->mtu < | 358 | if (!atomic_read(&bat_priv->fragmentation) && hard_iface->net_dev->mtu < |
351 | ETH_DATA_LEN + BAT_HEADER_LEN) | 359 | ETH_DATA_LEN + BAT_HEADER_LEN) |
352 | bat_info(batman_if->soft_iface, | 360 | bat_info(hard_iface->soft_iface, |
353 | "The MTU of interface %s is too small (%i) to handle " | 361 | "The MTU of interface %s is too small (%i) to handle " |
354 | "the transport of batman-adv packets. If you experience" | 362 | "the transport of batman-adv packets. If you experience" |
355 | " problems getting traffic through try increasing the " | 363 | " problems getting traffic through try increasing the " |
356 | "MTU to %zi.\n", | 364 | "MTU to %zi.\n", |
357 | batman_if->net_dev->name, batman_if->net_dev->mtu, | 365 | hard_iface->net_dev->name, hard_iface->net_dev->mtu, |
358 | ETH_DATA_LEN + BAT_HEADER_LEN); | 366 | ETH_DATA_LEN + BAT_HEADER_LEN); |
359 | 367 | ||
360 | if (hardif_is_iface_up(batman_if)) | 368 | if (hardif_is_iface_up(hard_iface)) |
361 | hardif_activate_interface(batman_if); | 369 | hardif_activate_interface(hard_iface); |
362 | else | 370 | else |
363 | bat_err(batman_if->soft_iface, "Not using interface %s " | 371 | bat_err(hard_iface->soft_iface, "Not using interface %s " |
364 | "(retrying later): interface not active\n", | 372 | "(retrying later): interface not active\n", |
365 | batman_if->net_dev->name); | 373 | hard_iface->net_dev->name); |
366 | 374 | ||
367 | /* begin scheduling originator messages on that interface */ | 375 | /* begin scheduling originator messages on that interface */ |
368 | schedule_own_packet(batman_if); | 376 | schedule_own_packet(hard_iface); |
369 | 377 | ||
370 | out: | 378 | out: |
371 | return 0; | 379 | return 0; |
372 | 380 | ||
373 | err: | 381 | err: |
374 | return -ENOMEM; | 382 | hardif_free_ref(hard_iface); |
383 | return ret; | ||
375 | } | 384 | } |
376 | 385 | ||
377 | void hardif_disable_interface(struct batman_if *batman_if) | 386 | void hardif_disable_interface(struct hard_iface *hard_iface) |
378 | { | 387 | { |
379 | struct bat_priv *bat_priv = netdev_priv(batman_if->soft_iface); | 388 | struct bat_priv *bat_priv = netdev_priv(hard_iface->soft_iface); |
380 | 389 | ||
381 | if (batman_if->if_status == IF_ACTIVE) | 390 | if (hard_iface->if_status == IF_ACTIVE) |
382 | hardif_deactivate_interface(batman_if); | 391 | hardif_deactivate_interface(hard_iface); |
383 | 392 | ||
384 | if (batman_if->if_status != IF_INACTIVE) | 393 | if (hard_iface->if_status != IF_INACTIVE) |
385 | return; | 394 | return; |
386 | 395 | ||
387 | bat_info(batman_if->soft_iface, "Removing interface: %s\n", | 396 | bat_info(hard_iface->soft_iface, "Removing interface: %s\n", |
388 | batman_if->net_dev->name); | 397 | hard_iface->net_dev->name); |
389 | dev_remove_pack(&batman_if->batman_adv_ptype); | 398 | dev_remove_pack(&hard_iface->batman_adv_ptype); |
390 | kref_put(&batman_if->refcount, hardif_free_ref); | ||
391 | 399 | ||
392 | bat_priv->num_ifaces--; | 400 | bat_priv->num_ifaces--; |
393 | orig_hash_del_if(batman_if, bat_priv->num_ifaces); | 401 | orig_hash_del_if(hard_iface, bat_priv->num_ifaces); |
394 | 402 | ||
395 | if (batman_if == bat_priv->primary_if) { | 403 | if (hard_iface == bat_priv->primary_if) { |
396 | struct batman_if *new_if; | 404 | struct hard_iface *new_if; |
397 | 405 | ||
398 | new_if = get_active_batman_if(batman_if->soft_iface); | 406 | new_if = hardif_get_active(hard_iface->soft_iface); |
399 | set_primary_if(bat_priv, new_if); | 407 | set_primary_if(bat_priv, new_if); |
400 | 408 | ||
401 | if (new_if) | 409 | if (new_if) |
402 | kref_put(&new_if->refcount, hardif_free_ref); | 410 | hardif_free_ref(new_if); |
403 | } | 411 | } |
404 | 412 | ||
405 | kfree(batman_if->packet_buff); | 413 | kfree(hard_iface->packet_buff); |
406 | batman_if->packet_buff = NULL; | 414 | hard_iface->packet_buff = NULL; |
407 | batman_if->if_status = IF_NOT_IN_USE; | 415 | hard_iface->if_status = IF_NOT_IN_USE; |
408 | 416 | ||
409 | /* delete all references to this batman_if */ | 417 | /* delete all references to this hard_iface */ |
410 | purge_orig_ref(bat_priv); | 418 | purge_orig_ref(bat_priv); |
411 | purge_outstanding_packets(bat_priv, batman_if); | 419 | purge_outstanding_packets(bat_priv, hard_iface); |
412 | dev_put(batman_if->soft_iface); | 420 | dev_put(hard_iface->soft_iface); |
413 | 421 | ||
414 | /* nobody uses this interface anymore */ | 422 | /* nobody uses this interface anymore */ |
415 | if (!bat_priv->num_ifaces) | 423 | if (!bat_priv->num_ifaces) |
416 | softif_destroy(batman_if->soft_iface); | 424 | softif_destroy(hard_iface->soft_iface); |
417 | 425 | ||
418 | batman_if->soft_iface = NULL; | 426 | hard_iface->soft_iface = NULL; |
427 | hardif_free_ref(hard_iface); | ||
419 | } | 428 | } |
420 | 429 | ||
421 | static struct batman_if *hardif_add_interface(struct net_device *net_dev) | 430 | static struct hard_iface *hardif_add_interface(struct net_device *net_dev) |
422 | { | 431 | { |
423 | struct batman_if *batman_if; | 432 | struct hard_iface *hard_iface; |
424 | int ret; | 433 | int ret; |
425 | 434 | ||
426 | ret = is_valid_iface(net_dev); | 435 | ret = is_valid_iface(net_dev); |
@@ -429,73 +438,73 @@ static struct batman_if *hardif_add_interface(struct net_device *net_dev) | |||
429 | 438 | ||
430 | dev_hold(net_dev); | 439 | dev_hold(net_dev); |
431 | 440 | ||
432 | batman_if = kmalloc(sizeof(struct batman_if), GFP_ATOMIC); | 441 | hard_iface = kmalloc(sizeof(struct hard_iface), GFP_ATOMIC); |
433 | if (!batman_if) { | 442 | if (!hard_iface) { |
434 | pr_err("Can't add interface (%s): out of memory\n", | 443 | pr_err("Can't add interface (%s): out of memory\n", |
435 | net_dev->name); | 444 | net_dev->name); |
436 | goto release_dev; | 445 | goto release_dev; |
437 | } | 446 | } |
438 | 447 | ||
439 | ret = sysfs_add_hardif(&batman_if->hardif_obj, net_dev); | 448 | ret = sysfs_add_hardif(&hard_iface->hardif_obj, net_dev); |
440 | if (ret) | 449 | if (ret) |
441 | goto free_if; | 450 | goto free_if; |
442 | 451 | ||
443 | batman_if->if_num = -1; | 452 | hard_iface->if_num = -1; |
444 | batman_if->net_dev = net_dev; | 453 | hard_iface->net_dev = net_dev; |
445 | batman_if->soft_iface = NULL; | 454 | hard_iface->soft_iface = NULL; |
446 | batman_if->if_status = IF_NOT_IN_USE; | 455 | hard_iface->if_status = IF_NOT_IN_USE; |
447 | INIT_LIST_HEAD(&batman_if->list); | 456 | INIT_LIST_HEAD(&hard_iface->list); |
448 | kref_init(&batman_if->refcount); | 457 | /* extra reference for return */ |
458 | atomic_set(&hard_iface->refcount, 2); | ||
449 | 459 | ||
450 | check_known_mac_addr(batman_if->net_dev); | 460 | check_known_mac_addr(hard_iface->net_dev); |
451 | 461 | ||
452 | spin_lock(&if_list_lock); | 462 | spin_lock(&hardif_list_lock); |
453 | list_add_tail_rcu(&batman_if->list, &if_list); | 463 | list_add_tail_rcu(&hard_iface->list, &hardif_list); |
454 | spin_unlock(&if_list_lock); | 464 | spin_unlock(&hardif_list_lock); |
455 | 465 | ||
456 | /* extra reference for return */ | 466 | return hard_iface; |
457 | kref_get(&batman_if->refcount); | ||
458 | return batman_if; | ||
459 | 467 | ||
460 | free_if: | 468 | free_if: |
461 | kfree(batman_if); | 469 | kfree(hard_iface); |
462 | release_dev: | 470 | release_dev: |
463 | dev_put(net_dev); | 471 | dev_put(net_dev); |
464 | out: | 472 | out: |
465 | return NULL; | 473 | return NULL; |
466 | } | 474 | } |
467 | 475 | ||
468 | static void hardif_remove_interface(struct batman_if *batman_if) | 476 | static void hardif_remove_interface(struct hard_iface *hard_iface) |
469 | { | 477 | { |
470 | /* first deactivate interface */ | 478 | /* first deactivate interface */ |
471 | if (batman_if->if_status != IF_NOT_IN_USE) | 479 | if (hard_iface->if_status != IF_NOT_IN_USE) |
472 | hardif_disable_interface(batman_if); | 480 | hardif_disable_interface(hard_iface); |
473 | 481 | ||
474 | if (batman_if->if_status != IF_NOT_IN_USE) | 482 | if (hard_iface->if_status != IF_NOT_IN_USE) |
475 | return; | 483 | return; |
476 | 484 | ||
477 | batman_if->if_status = IF_TO_BE_REMOVED; | 485 | hard_iface->if_status = IF_TO_BE_REMOVED; |
478 | sysfs_del_hardif(&batman_if->hardif_obj); | 486 | sysfs_del_hardif(&hard_iface->hardif_obj); |
479 | call_rcu(&batman_if->rcu, hardif_free_rcu); | 487 | hardif_free_ref(hard_iface); |
480 | } | 488 | } |
481 | 489 | ||
482 | void hardif_remove_interfaces(void) | 490 | void hardif_remove_interfaces(void) |
483 | { | 491 | { |
484 | struct batman_if *batman_if, *batman_if_tmp; | 492 | struct hard_iface *hard_iface, *hard_iface_tmp; |
485 | struct list_head if_queue; | 493 | struct list_head if_queue; |
486 | 494 | ||
487 | INIT_LIST_HEAD(&if_queue); | 495 | INIT_LIST_HEAD(&if_queue); |
488 | 496 | ||
489 | spin_lock(&if_list_lock); | 497 | spin_lock(&hardif_list_lock); |
490 | list_for_each_entry_safe(batman_if, batman_if_tmp, &if_list, list) { | 498 | list_for_each_entry_safe(hard_iface, hard_iface_tmp, |
491 | list_del_rcu(&batman_if->list); | 499 | &hardif_list, list) { |
492 | list_add_tail(&batman_if->list, &if_queue); | 500 | list_del_rcu(&hard_iface->list); |
501 | list_add_tail(&hard_iface->list, &if_queue); | ||
493 | } | 502 | } |
494 | spin_unlock(&if_list_lock); | 503 | spin_unlock(&hardif_list_lock); |
495 | 504 | ||
496 | rtnl_lock(); | 505 | rtnl_lock(); |
497 | list_for_each_entry_safe(batman_if, batman_if_tmp, &if_queue, list) { | 506 | list_for_each_entry_safe(hard_iface, hard_iface_tmp, &if_queue, list) { |
498 | hardif_remove_interface(batman_if); | 507 | hardif_remove_interface(hard_iface); |
499 | } | 508 | } |
500 | rtnl_unlock(); | 509 | rtnl_unlock(); |
501 | } | 510 | } |
@@ -504,43 +513,43 @@ static int hard_if_event(struct notifier_block *this, | |||
504 | unsigned long event, void *ptr) | 513 | unsigned long event, void *ptr) |
505 | { | 514 | { |
506 | struct net_device *net_dev = (struct net_device *)ptr; | 515 | struct net_device *net_dev = (struct net_device *)ptr; |
507 | struct batman_if *batman_if = get_batman_if_by_netdev(net_dev); | 516 | struct hard_iface *hard_iface = hardif_get_by_netdev(net_dev); |
508 | struct bat_priv *bat_priv; | 517 | struct bat_priv *bat_priv; |
509 | 518 | ||
510 | if (!batman_if && event == NETDEV_REGISTER) | 519 | if (!hard_iface && event == NETDEV_REGISTER) |
511 | batman_if = hardif_add_interface(net_dev); | 520 | hard_iface = hardif_add_interface(net_dev); |
512 | 521 | ||
513 | if (!batman_if) | 522 | if (!hard_iface) |
514 | goto out; | 523 | goto out; |
515 | 524 | ||
516 | switch (event) { | 525 | switch (event) { |
517 | case NETDEV_UP: | 526 | case NETDEV_UP: |
518 | hardif_activate_interface(batman_if); | 527 | hardif_activate_interface(hard_iface); |
519 | break; | 528 | break; |
520 | case NETDEV_GOING_DOWN: | 529 | case NETDEV_GOING_DOWN: |
521 | case NETDEV_DOWN: | 530 | case NETDEV_DOWN: |
522 | hardif_deactivate_interface(batman_if); | 531 | hardif_deactivate_interface(hard_iface); |
523 | break; | 532 | break; |
524 | case NETDEV_UNREGISTER: | 533 | case NETDEV_UNREGISTER: |
525 | spin_lock(&if_list_lock); | 534 | spin_lock(&hardif_list_lock); |
526 | list_del_rcu(&batman_if->list); | 535 | list_del_rcu(&hard_iface->list); |
527 | spin_unlock(&if_list_lock); | 536 | spin_unlock(&hardif_list_lock); |
528 | 537 | ||
529 | hardif_remove_interface(batman_if); | 538 | hardif_remove_interface(hard_iface); |
530 | break; | 539 | break; |
531 | case NETDEV_CHANGEMTU: | 540 | case NETDEV_CHANGEMTU: |
532 | if (batman_if->soft_iface) | 541 | if (hard_iface->soft_iface) |
533 | update_min_mtu(batman_if->soft_iface); | 542 | update_min_mtu(hard_iface->soft_iface); |
534 | break; | 543 | break; |
535 | case NETDEV_CHANGEADDR: | 544 | case NETDEV_CHANGEADDR: |
536 | if (batman_if->if_status == IF_NOT_IN_USE) | 545 | if (hard_iface->if_status == IF_NOT_IN_USE) |
537 | goto hardif_put; | 546 | goto hardif_put; |
538 | 547 | ||
539 | check_known_mac_addr(batman_if->net_dev); | 548 | check_known_mac_addr(hard_iface->net_dev); |
540 | update_mac_addresses(batman_if); | 549 | update_mac_addresses(hard_iface); |
541 | 550 | ||
542 | bat_priv = netdev_priv(batman_if->soft_iface); | 551 | bat_priv = netdev_priv(hard_iface->soft_iface); |
543 | if (batman_if == bat_priv->primary_if) | 552 | if (hard_iface == bat_priv->primary_if) |
544 | update_primary_addr(bat_priv); | 553 | update_primary_addr(bat_priv); |
545 | break; | 554 | break; |
546 | default: | 555 | default: |
@@ -548,7 +557,7 @@ static int hard_if_event(struct notifier_block *this, | |||
548 | }; | 557 | }; |
549 | 558 | ||
550 | hardif_put: | 559 | hardif_put: |
551 | kref_put(&batman_if->refcount, hardif_free_ref); | 560 | hardif_free_ref(hard_iface); |
552 | out: | 561 | out: |
553 | return NOTIFY_DONE; | 562 | return NOTIFY_DONE; |
554 | } | 563 | } |
@@ -561,10 +570,10 @@ static int batman_skb_recv(struct sk_buff *skb, struct net_device *dev, | |||
561 | { | 570 | { |
562 | struct bat_priv *bat_priv; | 571 | struct bat_priv *bat_priv; |
563 | struct batman_packet *batman_packet; | 572 | struct batman_packet *batman_packet; |
564 | struct batman_if *batman_if; | 573 | struct hard_iface *hard_iface; |
565 | int ret; | 574 | int ret; |
566 | 575 | ||
567 | batman_if = container_of(ptype, struct batman_if, batman_adv_ptype); | 576 | hard_iface = container_of(ptype, struct hard_iface, batman_adv_ptype); |
568 | skb = skb_share_check(skb, GFP_ATOMIC); | 577 | skb = skb_share_check(skb, GFP_ATOMIC); |
569 | 578 | ||
570 | /* skb was released by skb_share_check() */ | 579 | /* skb was released by skb_share_check() */ |
@@ -580,16 +589,16 @@ static int batman_skb_recv(struct sk_buff *skb, struct net_device *dev, | |||
580 | || !skb_mac_header(skb))) | 589 | || !skb_mac_header(skb))) |
581 | goto err_free; | 590 | goto err_free; |
582 | 591 | ||
583 | if (!batman_if->soft_iface) | 592 | if (!hard_iface->soft_iface) |
584 | goto err_free; | 593 | goto err_free; |
585 | 594 | ||
586 | bat_priv = netdev_priv(batman_if->soft_iface); | 595 | bat_priv = netdev_priv(hard_iface->soft_iface); |
587 | 596 | ||
588 | if (atomic_read(&bat_priv->mesh_state) != MESH_ACTIVE) | 597 | if (atomic_read(&bat_priv->mesh_state) != MESH_ACTIVE) |
589 | goto err_free; | 598 | goto err_free; |
590 | 599 | ||
591 | /* discard frames on not active interfaces */ | 600 | /* discard frames on not active interfaces */ |
592 | if (batman_if->if_status != IF_ACTIVE) | 601 | if (hard_iface->if_status != IF_ACTIVE) |
593 | goto err_free; | 602 | goto err_free; |
594 | 603 | ||
595 | batman_packet = (struct batman_packet *)skb->data; | 604 | batman_packet = (struct batman_packet *)skb->data; |
@@ -607,32 +616,32 @@ static int batman_skb_recv(struct sk_buff *skb, struct net_device *dev, | |||
607 | switch (batman_packet->packet_type) { | 616 | switch (batman_packet->packet_type) { |
608 | /* batman originator packet */ | 617 | /* batman originator packet */ |
609 | case BAT_PACKET: | 618 | case BAT_PACKET: |
610 | ret = recv_bat_packet(skb, batman_if); | 619 | ret = recv_bat_packet(skb, hard_iface); |
611 | break; | 620 | break; |
612 | 621 | ||
613 | /* batman icmp packet */ | 622 | /* batman icmp packet */ |
614 | case BAT_ICMP: | 623 | case BAT_ICMP: |
615 | ret = recv_icmp_packet(skb, batman_if); | 624 | ret = recv_icmp_packet(skb, hard_iface); |
616 | break; | 625 | break; |
617 | 626 | ||
618 | /* unicast packet */ | 627 | /* unicast packet */ |
619 | case BAT_UNICAST: | 628 | case BAT_UNICAST: |
620 | ret = recv_unicast_packet(skb, batman_if); | 629 | ret = recv_unicast_packet(skb, hard_iface); |
621 | break; | 630 | break; |
622 | 631 | ||
623 | /* fragmented unicast packet */ | 632 | /* fragmented unicast packet */ |
624 | case BAT_UNICAST_FRAG: | 633 | case BAT_UNICAST_FRAG: |
625 | ret = recv_ucast_frag_packet(skb, batman_if); | 634 | ret = recv_ucast_frag_packet(skb, hard_iface); |
626 | break; | 635 | break; |
627 | 636 | ||
628 | /* broadcast packet */ | 637 | /* broadcast packet */ |
629 | case BAT_BCAST: | 638 | case BAT_BCAST: |
630 | ret = recv_bcast_packet(skb, batman_if); | 639 | ret = recv_bcast_packet(skb, hard_iface); |
631 | break; | 640 | break; |
632 | 641 | ||
633 | /* vis packet */ | 642 | /* vis packet */ |
634 | case BAT_VIS: | 643 | case BAT_VIS: |
635 | ret = recv_vis_packet(skb, batman_if); | 644 | ret = recv_vis_packet(skb, hard_iface); |
636 | break; | 645 | break; |
637 | default: | 646 | default: |
638 | ret = NET_RX_DROP; | 647 | ret = NET_RX_DROP; |
diff --git a/net/batman-adv/hard-interface.h b/net/batman-adv/hard-interface.h index ad195438428a..a9ddf36e51c8 100644 --- a/net/batman-adv/hard-interface.h +++ b/net/batman-adv/hard-interface.h | |||
@@ -31,19 +31,18 @@ | |||
31 | 31 | ||
32 | extern struct notifier_block hard_if_notifier; | 32 | extern struct notifier_block hard_if_notifier; |
33 | 33 | ||
34 | struct batman_if *get_batman_if_by_netdev(struct net_device *net_dev); | 34 | struct hard_iface *hardif_get_by_netdev(struct net_device *net_dev); |
35 | int hardif_enable_interface(struct batman_if *batman_if, char *iface_name); | 35 | int hardif_enable_interface(struct hard_iface *hard_iface, char *iface_name); |
36 | void hardif_disable_interface(struct batman_if *batman_if); | 36 | void hardif_disable_interface(struct hard_iface *hard_iface); |
37 | void hardif_remove_interfaces(void); | 37 | void hardif_remove_interfaces(void); |
38 | int hardif_min_mtu(struct net_device *soft_iface); | 38 | int hardif_min_mtu(struct net_device *soft_iface); |
39 | void update_min_mtu(struct net_device *soft_iface); | 39 | void update_min_mtu(struct net_device *soft_iface); |
40 | void hardif_free_rcu(struct rcu_head *rcu); | ||
40 | 41 | ||
41 | static inline void hardif_free_ref(struct kref *refcount) | 42 | static inline void hardif_free_ref(struct hard_iface *hard_iface) |
42 | { | 43 | { |
43 | struct batman_if *batman_if; | 44 | if (atomic_dec_and_test(&hard_iface->refcount)) |
44 | 45 | call_rcu(&hard_iface->rcu, hardif_free_rcu); | |
45 | batman_if = container_of(refcount, struct batman_if, refcount); | ||
46 | kfree(batman_if); | ||
47 | } | 46 | } |
48 | 47 | ||
49 | #endif /* _NET_BATMAN_ADV_HARD_INTERFACE_H_ */ | 48 | #endif /* _NET_BATMAN_ADV_HARD_INTERFACE_H_ */ |
diff --git a/net/batman-adv/hash.c b/net/batman-adv/hash.c index fa2693973ab8..c5213d8f2cca 100644 --- a/net/batman-adv/hash.c +++ b/net/batman-adv/hash.c | |||
@@ -27,13 +27,16 @@ static void hash_init(struct hashtable_t *hash) | |||
27 | { | 27 | { |
28 | int i; | 28 | int i; |
29 | 29 | ||
30 | for (i = 0 ; i < hash->size; i++) | 30 | for (i = 0 ; i < hash->size; i++) { |
31 | INIT_HLIST_HEAD(&hash->table[i]); | 31 | INIT_HLIST_HEAD(&hash->table[i]); |
32 | spin_lock_init(&hash->list_locks[i]); | ||
33 | } | ||
32 | } | 34 | } |
33 | 35 | ||
34 | /* free only the hashtable and the hash itself. */ | 36 | /* free only the hashtable and the hash itself. */ |
35 | void hash_destroy(struct hashtable_t *hash) | 37 | void hash_destroy(struct hashtable_t *hash) |
36 | { | 38 | { |
39 | kfree(hash->list_locks); | ||
37 | kfree(hash->table); | 40 | kfree(hash->table); |
38 | kfree(hash); | 41 | kfree(hash); |
39 | } | 42 | } |
@@ -43,20 +46,25 @@ struct hashtable_t *hash_new(int size) | |||
43 | { | 46 | { |
44 | struct hashtable_t *hash; | 47 | struct hashtable_t *hash; |
45 | 48 | ||
46 | hash = kmalloc(sizeof(struct hashtable_t) , GFP_ATOMIC); | 49 | hash = kmalloc(sizeof(struct hashtable_t), GFP_ATOMIC); |
47 | |||
48 | if (!hash) | 50 | if (!hash) |
49 | return NULL; | 51 | return NULL; |
50 | 52 | ||
51 | hash->size = size; | ||
52 | hash->table = kmalloc(sizeof(struct element_t *) * size, GFP_ATOMIC); | 53 | hash->table = kmalloc(sizeof(struct element_t *) * size, GFP_ATOMIC); |
54 | if (!hash->table) | ||
55 | goto free_hash; | ||
53 | 56 | ||
54 | if (!hash->table) { | 57 | hash->list_locks = kmalloc(sizeof(spinlock_t) * size, GFP_ATOMIC); |
55 | kfree(hash); | 58 | if (!hash->list_locks) |
56 | return NULL; | 59 | goto free_table; |
57 | } | ||
58 | 60 | ||
61 | hash->size = size; | ||
59 | hash_init(hash); | 62 | hash_init(hash); |
60 | |||
61 | return hash; | 63 | return hash; |
64 | |||
65 | free_table: | ||
66 | kfree(hash->table); | ||
67 | free_hash: | ||
68 | kfree(hash); | ||
69 | return NULL; | ||
62 | } | 70 | } |
diff --git a/net/batman-adv/hash.h b/net/batman-adv/hash.h index eae24402fd0a..434822b27473 100644 --- a/net/batman-adv/hash.h +++ b/net/batman-adv/hash.h | |||
@@ -28,21 +28,17 @@ | |||
28 | * compare 2 element datas for their keys, | 28 | * compare 2 element datas for their keys, |
29 | * return 0 if same and not 0 if not | 29 | * return 0 if same and not 0 if not |
30 | * same */ | 30 | * same */ |
31 | typedef int (*hashdata_compare_cb)(void *, void *); | 31 | typedef int (*hashdata_compare_cb)(struct hlist_node *, void *); |
32 | 32 | ||
33 | /* the hashfunction, should return an index | 33 | /* the hashfunction, should return an index |
34 | * based on the key in the data of the first | 34 | * based on the key in the data of the first |
35 | * argument and the size the second */ | 35 | * argument and the size the second */ |
36 | typedef int (*hashdata_choose_cb)(void *, int); | 36 | typedef int (*hashdata_choose_cb)(void *, int); |
37 | typedef void (*hashdata_free_cb)(void *, void *); | 37 | typedef void (*hashdata_free_cb)(struct hlist_node *, void *); |
38 | |||
39 | struct element_t { | ||
40 | void *data; /* pointer to the data */ | ||
41 | struct hlist_node hlist; /* bucket list pointer */ | ||
42 | }; | ||
43 | 38 | ||
44 | struct hashtable_t { | 39 | struct hashtable_t { |
45 | struct hlist_head *table; /* the hashtable itself, with the buckets */ | 40 | struct hlist_head *table; /* the hashtable itself with the buckets */ |
41 | spinlock_t *list_locks; /* spinlock for each hash list entry */ | ||
46 | int size; /* size of hashtable */ | 42 | int size; /* size of hashtable */ |
47 | }; | 43 | }; |
48 | 44 | ||
@@ -59,21 +55,22 @@ static inline void hash_delete(struct hashtable_t *hash, | |||
59 | hashdata_free_cb free_cb, void *arg) | 55 | hashdata_free_cb free_cb, void *arg) |
60 | { | 56 | { |
61 | struct hlist_head *head; | 57 | struct hlist_head *head; |
62 | struct hlist_node *walk, *safe; | 58 | struct hlist_node *node, *node_tmp; |
63 | struct element_t *bucket; | 59 | spinlock_t *list_lock; /* spinlock to protect write access */ |
64 | int i; | 60 | int i; |
65 | 61 | ||
66 | for (i = 0; i < hash->size; i++) { | 62 | for (i = 0; i < hash->size; i++) { |
67 | head = &hash->table[i]; | 63 | head = &hash->table[i]; |
64 | list_lock = &hash->list_locks[i]; | ||
68 | 65 | ||
69 | hlist_for_each_safe(walk, safe, head) { | 66 | spin_lock_bh(list_lock); |
70 | bucket = hlist_entry(walk, struct element_t, hlist); | 67 | hlist_for_each_safe(node, node_tmp, head) { |
71 | if (free_cb) | 68 | hlist_del_rcu(node); |
72 | free_cb(bucket->data, arg); | ||
73 | 69 | ||
74 | hlist_del(walk); | 70 | if (free_cb) |
75 | kfree(bucket); | 71 | free_cb(node, arg); |
76 | } | 72 | } |
73 | spin_unlock_bh(list_lock); | ||
77 | } | 74 | } |
78 | 75 | ||
79 | hash_destroy(hash); | 76 | hash_destroy(hash); |
@@ -82,35 +79,41 @@ static inline void hash_delete(struct hashtable_t *hash, | |||
82 | /* adds data to the hashtable. returns 0 on success, -1 on error */ | 79 | /* adds data to the hashtable. returns 0 on success, -1 on error */ |
83 | static inline int hash_add(struct hashtable_t *hash, | 80 | static inline int hash_add(struct hashtable_t *hash, |
84 | hashdata_compare_cb compare, | 81 | hashdata_compare_cb compare, |
85 | hashdata_choose_cb choose, void *data) | 82 | hashdata_choose_cb choose, |
83 | void *data, struct hlist_node *data_node) | ||
86 | { | 84 | { |
87 | int index; | 85 | int index; |
88 | struct hlist_head *head; | 86 | struct hlist_head *head; |
89 | struct hlist_node *walk, *safe; | 87 | struct hlist_node *node; |
90 | struct element_t *bucket; | 88 | spinlock_t *list_lock; /* spinlock to protect write access */ |
91 | 89 | ||
92 | if (!hash) | 90 | if (!hash) |
93 | return -1; | 91 | goto err; |
94 | 92 | ||
95 | index = choose(data, hash->size); | 93 | index = choose(data, hash->size); |
96 | head = &hash->table[index]; | 94 | head = &hash->table[index]; |
95 | list_lock = &hash->list_locks[index]; | ||
96 | |||
97 | rcu_read_lock(); | ||
98 | __hlist_for_each_rcu(node, head) { | ||
99 | if (!compare(node, data)) | ||
100 | continue; | ||
97 | 101 | ||
98 | hlist_for_each_safe(walk, safe, head) { | 102 | goto err_unlock; |
99 | bucket = hlist_entry(walk, struct element_t, hlist); | ||
100 | if (compare(bucket->data, data)) | ||
101 | return -1; | ||
102 | } | 103 | } |
104 | rcu_read_unlock(); | ||
103 | 105 | ||
104 | /* no duplicate found in list, add new element */ | 106 | /* no duplicate found in list, add new element */ |
105 | bucket = kmalloc(sizeof(struct element_t), GFP_ATOMIC); | 107 | spin_lock_bh(list_lock); |
106 | 108 | hlist_add_head_rcu(data_node, head); | |
107 | if (!bucket) | 109 | spin_unlock_bh(list_lock); |
108 | return -1; | ||
109 | |||
110 | bucket->data = data; | ||
111 | hlist_add_head(&bucket->hlist, head); | ||
112 | 110 | ||
113 | return 0; | 111 | return 0; |
112 | |||
113 | err_unlock: | ||
114 | rcu_read_unlock(); | ||
115 | err: | ||
116 | return -1; | ||
114 | } | 117 | } |
115 | 118 | ||
116 | /* removes data from hash, if found. returns pointer do data on success, so you | 119 | /* removes data from hash, if found. returns pointer do data on success, so you |
@@ -122,50 +125,25 @@ static inline void *hash_remove(struct hashtable_t *hash, | |||
122 | hashdata_choose_cb choose, void *data) | 125 | hashdata_choose_cb choose, void *data) |
123 | { | 126 | { |
124 | size_t index; | 127 | size_t index; |
125 | struct hlist_node *walk; | 128 | struct hlist_node *node; |
126 | struct element_t *bucket; | ||
127 | struct hlist_head *head; | 129 | struct hlist_head *head; |
128 | void *data_save; | 130 | void *data_save = NULL; |
129 | 131 | ||
130 | index = choose(data, hash->size); | 132 | index = choose(data, hash->size); |
131 | head = &hash->table[index]; | 133 | head = &hash->table[index]; |
132 | 134 | ||
133 | hlist_for_each_entry(bucket, walk, head, hlist) { | 135 | spin_lock_bh(&hash->list_locks[index]); |
134 | if (compare(bucket->data, data)) { | 136 | hlist_for_each(node, head) { |
135 | data_save = bucket->data; | 137 | if (!compare(node, data)) |
136 | hlist_del(walk); | 138 | continue; |
137 | kfree(bucket); | ||
138 | return data_save; | ||
139 | } | ||
140 | } | ||
141 | |||
142 | return NULL; | ||
143 | } | ||
144 | |||
145 | /* finds data, based on the key in keydata. returns the found data on success, | ||
146 | * or NULL on error */ | ||
147 | static inline void *hash_find(struct hashtable_t *hash, | ||
148 | hashdata_compare_cb compare, | ||
149 | hashdata_choose_cb choose, void *keydata) | ||
150 | { | ||
151 | int index; | ||
152 | struct hlist_head *head; | ||
153 | struct hlist_node *walk; | ||
154 | struct element_t *bucket; | ||
155 | |||
156 | if (!hash) | ||
157 | return NULL; | ||
158 | |||
159 | index = choose(keydata , hash->size); | ||
160 | head = &hash->table[index]; | ||
161 | 139 | ||
162 | hlist_for_each(walk, head) { | 140 | data_save = node; |
163 | bucket = hlist_entry(walk, struct element_t, hlist); | 141 | hlist_del_rcu(node); |
164 | if (compare(bucket->data, keydata)) | 142 | break; |
165 | return bucket->data; | ||
166 | } | 143 | } |
144 | spin_unlock_bh(&hash->list_locks[index]); | ||
167 | 145 | ||
168 | return NULL; | 146 | return data_save; |
169 | } | 147 | } |
170 | 148 | ||
171 | #endif /* _NET_BATMAN_ADV_HASH_H_ */ | 149 | #endif /* _NET_BATMAN_ADV_HASH_H_ */ |
diff --git a/net/batman-adv/icmp_socket.c b/net/batman-adv/icmp_socket.c index 319a7ccf6efa..34ce56c358e5 100644 --- a/net/batman-adv/icmp_socket.c +++ b/net/batman-adv/icmp_socket.c | |||
@@ -156,10 +156,9 @@ static ssize_t bat_socket_write(struct file *file, const char __user *buff, | |||
156 | struct sk_buff *skb; | 156 | struct sk_buff *skb; |
157 | struct icmp_packet_rr *icmp_packet; | 157 | struct icmp_packet_rr *icmp_packet; |
158 | 158 | ||
159 | struct orig_node *orig_node; | 159 | struct orig_node *orig_node = NULL; |
160 | struct batman_if *batman_if; | 160 | struct neigh_node *neigh_node = NULL; |
161 | size_t packet_len = sizeof(struct icmp_packet); | 161 | size_t packet_len = sizeof(struct icmp_packet); |
162 | uint8_t dstaddr[ETH_ALEN]; | ||
163 | 162 | ||
164 | if (len < sizeof(struct icmp_packet)) { | 163 | if (len < sizeof(struct icmp_packet)) { |
165 | bat_dbg(DBG_BATMAN, bat_priv, | 164 | bat_dbg(DBG_BATMAN, bat_priv, |
@@ -219,47 +218,52 @@ static ssize_t bat_socket_write(struct file *file, const char __user *buff, | |||
219 | if (atomic_read(&bat_priv->mesh_state) != MESH_ACTIVE) | 218 | if (atomic_read(&bat_priv->mesh_state) != MESH_ACTIVE) |
220 | goto dst_unreach; | 219 | goto dst_unreach; |
221 | 220 | ||
222 | spin_lock_bh(&bat_priv->orig_hash_lock); | 221 | rcu_read_lock(); |
223 | orig_node = ((struct orig_node *)hash_find(bat_priv->orig_hash, | 222 | orig_node = orig_hash_find(bat_priv, icmp_packet->dst); |
224 | compare_orig, choose_orig, | ||
225 | icmp_packet->dst)); | ||
226 | 223 | ||
227 | if (!orig_node) | 224 | if (!orig_node) |
228 | goto unlock; | 225 | goto unlock; |
229 | 226 | ||
230 | if (!orig_node->router) | 227 | neigh_node = orig_node->router; |
228 | |||
229 | if (!neigh_node) | ||
231 | goto unlock; | 230 | goto unlock; |
232 | 231 | ||
233 | batman_if = orig_node->router->if_incoming; | 232 | if (!atomic_inc_not_zero(&neigh_node->refcount)) { |
234 | memcpy(dstaddr, orig_node->router->addr, ETH_ALEN); | 233 | neigh_node = NULL; |
234 | goto unlock; | ||
235 | } | ||
235 | 236 | ||
236 | spin_unlock_bh(&bat_priv->orig_hash_lock); | 237 | rcu_read_unlock(); |
237 | 238 | ||
238 | if (!batman_if) | 239 | if (!neigh_node->if_incoming) |
239 | goto dst_unreach; | 240 | goto dst_unreach; |
240 | 241 | ||
241 | if (batman_if->if_status != IF_ACTIVE) | 242 | if (neigh_node->if_incoming->if_status != IF_ACTIVE) |
242 | goto dst_unreach; | 243 | goto dst_unreach; |
243 | 244 | ||
244 | memcpy(icmp_packet->orig, | 245 | memcpy(icmp_packet->orig, |
245 | bat_priv->primary_if->net_dev->dev_addr, ETH_ALEN); | 246 | bat_priv->primary_if->net_dev->dev_addr, ETH_ALEN); |
246 | 247 | ||
247 | if (packet_len == sizeof(struct icmp_packet_rr)) | 248 | if (packet_len == sizeof(struct icmp_packet_rr)) |
248 | memcpy(icmp_packet->rr, batman_if->net_dev->dev_addr, ETH_ALEN); | 249 | memcpy(icmp_packet->rr, |
249 | 250 | neigh_node->if_incoming->net_dev->dev_addr, ETH_ALEN); | |
250 | |||
251 | send_skb_packet(skb, batman_if, dstaddr); | ||
252 | 251 | ||
252 | send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr); | ||
253 | goto out; | 253 | goto out; |
254 | 254 | ||
255 | unlock: | 255 | unlock: |
256 | spin_unlock_bh(&bat_priv->orig_hash_lock); | 256 | rcu_read_unlock(); |
257 | dst_unreach: | 257 | dst_unreach: |
258 | icmp_packet->msg_type = DESTINATION_UNREACHABLE; | 258 | icmp_packet->msg_type = DESTINATION_UNREACHABLE; |
259 | bat_socket_add_packet(socket_client, icmp_packet, packet_len); | 259 | bat_socket_add_packet(socket_client, icmp_packet, packet_len); |
260 | free_skb: | 260 | free_skb: |
261 | kfree_skb(skb); | 261 | kfree_skb(skb); |
262 | out: | 262 | out: |
263 | if (neigh_node) | ||
264 | neigh_node_free_ref(neigh_node); | ||
265 | if (orig_node) | ||
266 | orig_node_free_ref(orig_node); | ||
263 | return len; | 267 | return len; |
264 | } | 268 | } |
265 | 269 | ||
diff --git a/net/batman-adv/main.c b/net/batman-adv/main.c index 06d956c91c27..709b33bbdf43 100644 --- a/net/batman-adv/main.c +++ b/net/batman-adv/main.c | |||
@@ -33,7 +33,7 @@ | |||
33 | #include "vis.h" | 33 | #include "vis.h" |
34 | #include "hash.h" | 34 | #include "hash.h" |
35 | 35 | ||
36 | struct list_head if_list; | 36 | struct list_head hardif_list; |
37 | 37 | ||
38 | unsigned char broadcast_addr[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}; | 38 | unsigned char broadcast_addr[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}; |
39 | 39 | ||
@@ -41,7 +41,7 @@ struct workqueue_struct *bat_event_workqueue; | |||
41 | 41 | ||
42 | static int __init batman_init(void) | 42 | static int __init batman_init(void) |
43 | { | 43 | { |
44 | INIT_LIST_HEAD(&if_list); | 44 | INIT_LIST_HEAD(&hardif_list); |
45 | 45 | ||
46 | /* the name should not be longer than 10 chars - see | 46 | /* the name should not be longer than 10 chars - see |
47 | * http://lwn.net/Articles/23634/ */ | 47 | * http://lwn.net/Articles/23634/ */ |
@@ -79,7 +79,6 @@ int mesh_init(struct net_device *soft_iface) | |||
79 | { | 79 | { |
80 | struct bat_priv *bat_priv = netdev_priv(soft_iface); | 80 | struct bat_priv *bat_priv = netdev_priv(soft_iface); |
81 | 81 | ||
82 | spin_lock_init(&bat_priv->orig_hash_lock); | ||
83 | spin_lock_init(&bat_priv->forw_bat_list_lock); | 82 | spin_lock_init(&bat_priv->forw_bat_list_lock); |
84 | spin_lock_init(&bat_priv->forw_bcast_list_lock); | 83 | spin_lock_init(&bat_priv->forw_bcast_list_lock); |
85 | spin_lock_init(&bat_priv->hna_lhash_lock); | 84 | spin_lock_init(&bat_priv->hna_lhash_lock); |
@@ -154,14 +153,14 @@ void dec_module_count(void) | |||
154 | 153 | ||
155 | int is_my_mac(uint8_t *addr) | 154 | int is_my_mac(uint8_t *addr) |
156 | { | 155 | { |
157 | struct batman_if *batman_if; | 156 | struct hard_iface *hard_iface; |
158 | 157 | ||
159 | rcu_read_lock(); | 158 | rcu_read_lock(); |
160 | list_for_each_entry_rcu(batman_if, &if_list, list) { | 159 | list_for_each_entry_rcu(hard_iface, &hardif_list, list) { |
161 | if (batman_if->if_status != IF_ACTIVE) | 160 | if (hard_iface->if_status != IF_ACTIVE) |
162 | continue; | 161 | continue; |
163 | 162 | ||
164 | if (compare_orig(batman_if->net_dev->dev_addr, addr)) { | 163 | if (compare_eth(hard_iface->net_dev->dev_addr, addr)) { |
165 | rcu_read_unlock(); | 164 | rcu_read_unlock(); |
166 | return 1; | 165 | return 1; |
167 | } | 166 | } |
diff --git a/net/batman-adv/main.h b/net/batman-adv/main.h index e235d7bbe045..dc248697de71 100644 --- a/net/batman-adv/main.h +++ b/net/batman-adv/main.h | |||
@@ -122,7 +122,7 @@ | |||
122 | #define REVISION_VERSION_STR " "REVISION_VERSION | 122 | #define REVISION_VERSION_STR " "REVISION_VERSION |
123 | #endif | 123 | #endif |
124 | 124 | ||
125 | extern struct list_head if_list; | 125 | extern struct list_head hardif_list; |
126 | 126 | ||
127 | extern unsigned char broadcast_addr[]; | 127 | extern unsigned char broadcast_addr[]; |
128 | extern struct workqueue_struct *bat_event_workqueue; | 128 | extern struct workqueue_struct *bat_event_workqueue; |
@@ -165,4 +165,14 @@ static inline void bat_dbg(char type __always_unused, | |||
165 | pr_err("%s: " fmt, _netdev->name, ## arg); \ | 165 | pr_err("%s: " fmt, _netdev->name, ## arg); \ |
166 | } while (0) | 166 | } while (0) |
167 | 167 | ||
168 | /** | ||
169 | * returns 1 if they are the same ethernet addr | ||
170 | * | ||
171 | * note: can't use compare_ether_addr() as it requires aligned memory | ||
172 | */ | ||
173 | static inline int compare_eth(void *data1, void *data2) | ||
174 | { | ||
175 | return (memcmp(data1, data2, ETH_ALEN) == 0 ? 1 : 0); | ||
176 | } | ||
177 | |||
168 | #endif /* _NET_BATMAN_ADV_MAIN_H_ */ | 178 | #endif /* _NET_BATMAN_ADV_MAIN_H_ */ |
diff --git a/net/batman-adv/originator.c b/net/batman-adv/originator.c index 54863c9385de..0b9133022d2d 100644 --- a/net/batman-adv/originator.c +++ b/net/batman-adv/originator.c | |||
@@ -44,24 +44,36 @@ int originator_init(struct bat_priv *bat_priv) | |||
44 | if (bat_priv->orig_hash) | 44 | if (bat_priv->orig_hash) |
45 | return 1; | 45 | return 1; |
46 | 46 | ||
47 | spin_lock_bh(&bat_priv->orig_hash_lock); | ||
48 | bat_priv->orig_hash = hash_new(1024); | 47 | bat_priv->orig_hash = hash_new(1024); |
49 | 48 | ||
50 | if (!bat_priv->orig_hash) | 49 | if (!bat_priv->orig_hash) |
51 | goto err; | 50 | goto err; |
52 | 51 | ||
53 | spin_unlock_bh(&bat_priv->orig_hash_lock); | ||
54 | start_purge_timer(bat_priv); | 52 | start_purge_timer(bat_priv); |
55 | return 1; | 53 | return 1; |
56 | 54 | ||
57 | err: | 55 | err: |
58 | spin_unlock_bh(&bat_priv->orig_hash_lock); | ||
59 | return 0; | 56 | return 0; |
60 | } | 57 | } |
61 | 58 | ||
62 | struct neigh_node * | 59 | static void neigh_node_free_rcu(struct rcu_head *rcu) |
63 | create_neighbor(struct orig_node *orig_node, struct orig_node *orig_neigh_node, | 60 | { |
64 | uint8_t *neigh, struct batman_if *if_incoming) | 61 | struct neigh_node *neigh_node; |
62 | |||
63 | neigh_node = container_of(rcu, struct neigh_node, rcu); | ||
64 | kfree(neigh_node); | ||
65 | } | ||
66 | |||
67 | void neigh_node_free_ref(struct neigh_node *neigh_node) | ||
68 | { | ||
69 | if (atomic_dec_and_test(&neigh_node->refcount)) | ||
70 | call_rcu(&neigh_node->rcu, neigh_node_free_rcu); | ||
71 | } | ||
72 | |||
73 | struct neigh_node *create_neighbor(struct orig_node *orig_node, | ||
74 | struct orig_node *orig_neigh_node, | ||
75 | uint8_t *neigh, | ||
76 | struct hard_iface *if_incoming) | ||
65 | { | 77 | { |
66 | struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface); | 78 | struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface); |
67 | struct neigh_node *neigh_node; | 79 | struct neigh_node *neigh_node; |
@@ -73,50 +85,94 @@ create_neighbor(struct orig_node *orig_node, struct orig_node *orig_neigh_node, | |||
73 | if (!neigh_node) | 85 | if (!neigh_node) |
74 | return NULL; | 86 | return NULL; |
75 | 87 | ||
76 | INIT_LIST_HEAD(&neigh_node->list); | 88 | INIT_HLIST_NODE(&neigh_node->list); |
89 | INIT_LIST_HEAD(&neigh_node->bonding_list); | ||
77 | 90 | ||
78 | memcpy(neigh_node->addr, neigh, ETH_ALEN); | 91 | memcpy(neigh_node->addr, neigh, ETH_ALEN); |
79 | neigh_node->orig_node = orig_neigh_node; | 92 | neigh_node->orig_node = orig_neigh_node; |
80 | neigh_node->if_incoming = if_incoming; | 93 | neigh_node->if_incoming = if_incoming; |
81 | 94 | ||
82 | list_add_tail(&neigh_node->list, &orig_node->neigh_list); | 95 | /* extra reference for return */ |
96 | atomic_set(&neigh_node->refcount, 2); | ||
97 | |||
98 | spin_lock_bh(&orig_node->neigh_list_lock); | ||
99 | hlist_add_head_rcu(&neigh_node->list, &orig_node->neigh_list); | ||
100 | spin_unlock_bh(&orig_node->neigh_list_lock); | ||
83 | return neigh_node; | 101 | return neigh_node; |
84 | } | 102 | } |
85 | 103 | ||
86 | static void free_orig_node(void *data, void *arg) | 104 | static void orig_node_free_rcu(struct rcu_head *rcu) |
87 | { | 105 | { |
88 | struct list_head *list_pos, *list_pos_tmp; | 106 | struct hlist_node *node, *node_tmp; |
89 | struct neigh_node *neigh_node; | 107 | struct neigh_node *neigh_node, *tmp_neigh_node; |
90 | struct orig_node *orig_node = (struct orig_node *)data; | 108 | struct orig_node *orig_node; |
91 | struct bat_priv *bat_priv = (struct bat_priv *)arg; | ||
92 | 109 | ||
93 | /* for all neighbors towards this originator ... */ | 110 | orig_node = container_of(rcu, struct orig_node, rcu); |
94 | list_for_each_safe(list_pos, list_pos_tmp, &orig_node->neigh_list) { | 111 | |
95 | neigh_node = list_entry(list_pos, struct neigh_node, list); | 112 | spin_lock_bh(&orig_node->neigh_list_lock); |
113 | |||
114 | /* for all bonding members ... */ | ||
115 | list_for_each_entry_safe(neigh_node, tmp_neigh_node, | ||
116 | &orig_node->bond_list, bonding_list) { | ||
117 | list_del_rcu(&neigh_node->bonding_list); | ||
118 | neigh_node_free_ref(neigh_node); | ||
119 | } | ||
96 | 120 | ||
97 | list_del(list_pos); | 121 | /* for all neighbors towards this originator ... */ |
98 | kfree(neigh_node); | 122 | hlist_for_each_entry_safe(neigh_node, node, node_tmp, |
123 | &orig_node->neigh_list, list) { | ||
124 | hlist_del_rcu(&neigh_node->list); | ||
125 | neigh_node_free_ref(neigh_node); | ||
99 | } | 126 | } |
100 | 127 | ||
128 | spin_unlock_bh(&orig_node->neigh_list_lock); | ||
129 | |||
101 | frag_list_free(&orig_node->frag_list); | 130 | frag_list_free(&orig_node->frag_list); |
102 | hna_global_del_orig(bat_priv, orig_node, "originator timed out"); | 131 | hna_global_del_orig(orig_node->bat_priv, orig_node, |
132 | "originator timed out"); | ||
103 | 133 | ||
104 | kfree(orig_node->bcast_own); | 134 | kfree(orig_node->bcast_own); |
105 | kfree(orig_node->bcast_own_sum); | 135 | kfree(orig_node->bcast_own_sum); |
106 | kfree(orig_node); | 136 | kfree(orig_node); |
107 | } | 137 | } |
108 | 138 | ||
139 | void orig_node_free_ref(struct orig_node *orig_node) | ||
140 | { | ||
141 | if (atomic_dec_and_test(&orig_node->refcount)) | ||
142 | call_rcu(&orig_node->rcu, orig_node_free_rcu); | ||
143 | } | ||
144 | |||
109 | void originator_free(struct bat_priv *bat_priv) | 145 | void originator_free(struct bat_priv *bat_priv) |
110 | { | 146 | { |
111 | if (!bat_priv->orig_hash) | 147 | struct hashtable_t *hash = bat_priv->orig_hash; |
148 | struct hlist_node *node, *node_tmp; | ||
149 | struct hlist_head *head; | ||
150 | spinlock_t *list_lock; /* spinlock to protect write access */ | ||
151 | struct orig_node *orig_node; | ||
152 | int i; | ||
153 | |||
154 | if (!hash) | ||
112 | return; | 155 | return; |
113 | 156 | ||
114 | cancel_delayed_work_sync(&bat_priv->orig_work); | 157 | cancel_delayed_work_sync(&bat_priv->orig_work); |
115 | 158 | ||
116 | spin_lock_bh(&bat_priv->orig_hash_lock); | ||
117 | hash_delete(bat_priv->orig_hash, free_orig_node, bat_priv); | ||
118 | bat_priv->orig_hash = NULL; | 159 | bat_priv->orig_hash = NULL; |
119 | spin_unlock_bh(&bat_priv->orig_hash_lock); | 160 | |
161 | for (i = 0; i < hash->size; i++) { | ||
162 | head = &hash->table[i]; | ||
163 | list_lock = &hash->list_locks[i]; | ||
164 | |||
165 | spin_lock_bh(list_lock); | ||
166 | hlist_for_each_entry_safe(orig_node, node, node_tmp, | ||
167 | head, hash_entry) { | ||
168 | |||
169 | hlist_del_rcu(node); | ||
170 | orig_node_free_ref(orig_node); | ||
171 | } | ||
172 | spin_unlock_bh(list_lock); | ||
173 | } | ||
174 | |||
175 | hash_destroy(hash); | ||
120 | } | 176 | } |
121 | 177 | ||
122 | /* this function finds or creates an originator entry for the given | 178 | /* this function finds or creates an originator entry for the given |
@@ -127,10 +183,7 @@ struct orig_node *get_orig_node(struct bat_priv *bat_priv, uint8_t *addr) | |||
127 | int size; | 183 | int size; |
128 | int hash_added; | 184 | int hash_added; |
129 | 185 | ||
130 | orig_node = ((struct orig_node *)hash_find(bat_priv->orig_hash, | 186 | orig_node = orig_hash_find(bat_priv, addr); |
131 | compare_orig, choose_orig, | ||
132 | addr)); | ||
133 | |||
134 | if (orig_node) | 187 | if (orig_node) |
135 | return orig_node; | 188 | return orig_node; |
136 | 189 | ||
@@ -141,8 +194,16 @@ struct orig_node *get_orig_node(struct bat_priv *bat_priv, uint8_t *addr) | |||
141 | if (!orig_node) | 194 | if (!orig_node) |
142 | return NULL; | 195 | return NULL; |
143 | 196 | ||
144 | INIT_LIST_HEAD(&orig_node->neigh_list); | 197 | INIT_HLIST_HEAD(&orig_node->neigh_list); |
198 | INIT_LIST_HEAD(&orig_node->bond_list); | ||
199 | spin_lock_init(&orig_node->ogm_cnt_lock); | ||
200 | spin_lock_init(&orig_node->bcast_seqno_lock); | ||
201 | spin_lock_init(&orig_node->neigh_list_lock); | ||
202 | |||
203 | /* extra reference for return */ | ||
204 | atomic_set(&orig_node->refcount, 2); | ||
145 | 205 | ||
206 | orig_node->bat_priv = bat_priv; | ||
146 | memcpy(orig_node->orig, addr, ETH_ALEN); | 207 | memcpy(orig_node->orig, addr, ETH_ALEN); |
147 | orig_node->router = NULL; | 208 | orig_node->router = NULL; |
148 | orig_node->hna_buff = NULL; | 209 | orig_node->hna_buff = NULL; |
@@ -151,6 +212,8 @@ struct orig_node *get_orig_node(struct bat_priv *bat_priv, uint8_t *addr) | |||
151 | orig_node->batman_seqno_reset = jiffies - 1 | 212 | orig_node->batman_seqno_reset = jiffies - 1 |
152 | - msecs_to_jiffies(RESET_PROTECTION_MS); | 213 | - msecs_to_jiffies(RESET_PROTECTION_MS); |
153 | 214 | ||
215 | atomic_set(&orig_node->bond_candidates, 0); | ||
216 | |||
154 | size = bat_priv->num_ifaces * sizeof(unsigned long) * NUM_WORDS; | 217 | size = bat_priv->num_ifaces * sizeof(unsigned long) * NUM_WORDS; |
155 | 218 | ||
156 | orig_node->bcast_own = kzalloc(size, GFP_ATOMIC); | 219 | orig_node->bcast_own = kzalloc(size, GFP_ATOMIC); |
@@ -166,8 +229,8 @@ struct orig_node *get_orig_node(struct bat_priv *bat_priv, uint8_t *addr) | |||
166 | if (!orig_node->bcast_own_sum) | 229 | if (!orig_node->bcast_own_sum) |
167 | goto free_bcast_own; | 230 | goto free_bcast_own; |
168 | 231 | ||
169 | hash_added = hash_add(bat_priv->orig_hash, compare_orig, choose_orig, | 232 | hash_added = hash_add(bat_priv->orig_hash, compare_orig, |
170 | orig_node); | 233 | choose_orig, orig_node, &orig_node->hash_entry); |
171 | if (hash_added < 0) | 234 | if (hash_added < 0) |
172 | goto free_bcast_own_sum; | 235 | goto free_bcast_own_sum; |
173 | 236 | ||
@@ -185,23 +248,30 @@ static bool purge_orig_neighbors(struct bat_priv *bat_priv, | |||
185 | struct orig_node *orig_node, | 248 | struct orig_node *orig_node, |
186 | struct neigh_node **best_neigh_node) | 249 | struct neigh_node **best_neigh_node) |
187 | { | 250 | { |
188 | struct list_head *list_pos, *list_pos_tmp; | 251 | struct hlist_node *node, *node_tmp; |
189 | struct neigh_node *neigh_node; | 252 | struct neigh_node *neigh_node; |
190 | bool neigh_purged = false; | 253 | bool neigh_purged = false; |
191 | 254 | ||
192 | *best_neigh_node = NULL; | 255 | *best_neigh_node = NULL; |
193 | 256 | ||
257 | spin_lock_bh(&orig_node->neigh_list_lock); | ||
258 | |||
194 | /* for all neighbors towards this originator ... */ | 259 | /* for all neighbors towards this originator ... */ |
195 | list_for_each_safe(list_pos, list_pos_tmp, &orig_node->neigh_list) { | 260 | hlist_for_each_entry_safe(neigh_node, node, node_tmp, |
196 | neigh_node = list_entry(list_pos, struct neigh_node, list); | 261 | &orig_node->neigh_list, list) { |
197 | 262 | ||
198 | if ((time_after(jiffies, | 263 | if ((time_after(jiffies, |
199 | neigh_node->last_valid + PURGE_TIMEOUT * HZ)) || | 264 | neigh_node->last_valid + PURGE_TIMEOUT * HZ)) || |
200 | (neigh_node->if_incoming->if_status == IF_INACTIVE) || | 265 | (neigh_node->if_incoming->if_status == IF_INACTIVE) || |
266 | (neigh_node->if_incoming->if_status == IF_NOT_IN_USE) || | ||
201 | (neigh_node->if_incoming->if_status == IF_TO_BE_REMOVED)) { | 267 | (neigh_node->if_incoming->if_status == IF_TO_BE_REMOVED)) { |
202 | 268 | ||
203 | if (neigh_node->if_incoming->if_status == | 269 | if ((neigh_node->if_incoming->if_status == |
204 | IF_TO_BE_REMOVED) | 270 | IF_INACTIVE) || |
271 | (neigh_node->if_incoming->if_status == | ||
272 | IF_NOT_IN_USE) || | ||
273 | (neigh_node->if_incoming->if_status == | ||
274 | IF_TO_BE_REMOVED)) | ||
205 | bat_dbg(DBG_BATMAN, bat_priv, | 275 | bat_dbg(DBG_BATMAN, bat_priv, |
206 | "neighbor purge: originator %pM, " | 276 | "neighbor purge: originator %pM, " |
207 | "neighbor: %pM, iface: %s\n", | 277 | "neighbor: %pM, iface: %s\n", |
@@ -215,14 +285,18 @@ static bool purge_orig_neighbors(struct bat_priv *bat_priv, | |||
215 | (neigh_node->last_valid / HZ)); | 285 | (neigh_node->last_valid / HZ)); |
216 | 286 | ||
217 | neigh_purged = true; | 287 | neigh_purged = true; |
218 | list_del(list_pos); | 288 | |
219 | kfree(neigh_node); | 289 | hlist_del_rcu(&neigh_node->list); |
290 | bonding_candidate_del(orig_node, neigh_node); | ||
291 | neigh_node_free_ref(neigh_node); | ||
220 | } else { | 292 | } else { |
221 | if ((!*best_neigh_node) || | 293 | if ((!*best_neigh_node) || |
222 | (neigh_node->tq_avg > (*best_neigh_node)->tq_avg)) | 294 | (neigh_node->tq_avg > (*best_neigh_node)->tq_avg)) |
223 | *best_neigh_node = neigh_node; | 295 | *best_neigh_node = neigh_node; |
224 | } | 296 | } |
225 | } | 297 | } |
298 | |||
299 | spin_unlock_bh(&orig_node->neigh_list_lock); | ||
226 | return neigh_purged; | 300 | return neigh_purged; |
227 | } | 301 | } |
228 | 302 | ||
@@ -245,9 +319,6 @@ static bool purge_orig_node(struct bat_priv *bat_priv, | |||
245 | best_neigh_node, | 319 | best_neigh_node, |
246 | orig_node->hna_buff, | 320 | orig_node->hna_buff, |
247 | orig_node->hna_buff_len); | 321 | orig_node->hna_buff_len); |
248 | /* update bonding candidates, we could have lost | ||
249 | * some candidates. */ | ||
250 | update_bonding_candidates(orig_node); | ||
251 | } | 322 | } |
252 | } | 323 | } |
253 | 324 | ||
@@ -257,40 +328,38 @@ static bool purge_orig_node(struct bat_priv *bat_priv, | |||
257 | static void _purge_orig(struct bat_priv *bat_priv) | 328 | static void _purge_orig(struct bat_priv *bat_priv) |
258 | { | 329 | { |
259 | struct hashtable_t *hash = bat_priv->orig_hash; | 330 | struct hashtable_t *hash = bat_priv->orig_hash; |
260 | struct hlist_node *walk, *safe; | 331 | struct hlist_node *node, *node_tmp; |
261 | struct hlist_head *head; | 332 | struct hlist_head *head; |
262 | struct element_t *bucket; | 333 | spinlock_t *list_lock; /* spinlock to protect write access */ |
263 | struct orig_node *orig_node; | 334 | struct orig_node *orig_node; |
264 | int i; | 335 | int i; |
265 | 336 | ||
266 | if (!hash) | 337 | if (!hash) |
267 | return; | 338 | return; |
268 | 339 | ||
269 | spin_lock_bh(&bat_priv->orig_hash_lock); | ||
270 | |||
271 | /* for all origins... */ | 340 | /* for all origins... */ |
272 | for (i = 0; i < hash->size; i++) { | 341 | for (i = 0; i < hash->size; i++) { |
273 | head = &hash->table[i]; | 342 | head = &hash->table[i]; |
343 | list_lock = &hash->list_locks[i]; | ||
274 | 344 | ||
275 | hlist_for_each_entry_safe(bucket, walk, safe, head, hlist) { | 345 | spin_lock_bh(list_lock); |
276 | orig_node = bucket->data; | 346 | hlist_for_each_entry_safe(orig_node, node, node_tmp, |
277 | 347 | head, hash_entry) { | |
278 | if (purge_orig_node(bat_priv, orig_node)) { | 348 | if (purge_orig_node(bat_priv, orig_node)) { |
279 | if (orig_node->gw_flags) | 349 | if (orig_node->gw_flags) |
280 | gw_node_delete(bat_priv, orig_node); | 350 | gw_node_delete(bat_priv, orig_node); |
281 | hlist_del(walk); | 351 | hlist_del_rcu(node); |
282 | kfree(bucket); | 352 | orig_node_free_ref(orig_node); |
283 | free_orig_node(orig_node, bat_priv); | 353 | continue; |
284 | } | 354 | } |
285 | 355 | ||
286 | if (time_after(jiffies, orig_node->last_frag_packet + | 356 | if (time_after(jiffies, orig_node->last_frag_packet + |
287 | msecs_to_jiffies(FRAG_TIMEOUT))) | 357 | msecs_to_jiffies(FRAG_TIMEOUT))) |
288 | frag_list_free(&orig_node->frag_list); | 358 | frag_list_free(&orig_node->frag_list); |
289 | } | 359 | } |
360 | spin_unlock_bh(list_lock); | ||
290 | } | 361 | } |
291 | 362 | ||
292 | spin_unlock_bh(&bat_priv->orig_hash_lock); | ||
293 | |||
294 | gw_node_purge(bat_priv); | 363 | gw_node_purge(bat_priv); |
295 | gw_election(bat_priv); | 364 | gw_election(bat_priv); |
296 | 365 | ||
@@ -318,9 +387,8 @@ int orig_seq_print_text(struct seq_file *seq, void *offset) | |||
318 | struct net_device *net_dev = (struct net_device *)seq->private; | 387 | struct net_device *net_dev = (struct net_device *)seq->private; |
319 | struct bat_priv *bat_priv = netdev_priv(net_dev); | 388 | struct bat_priv *bat_priv = netdev_priv(net_dev); |
320 | struct hashtable_t *hash = bat_priv->orig_hash; | 389 | struct hashtable_t *hash = bat_priv->orig_hash; |
321 | struct hlist_node *walk; | 390 | struct hlist_node *node, *node_tmp; |
322 | struct hlist_head *head; | 391 | struct hlist_head *head; |
323 | struct element_t *bucket; | ||
324 | struct orig_node *orig_node; | 392 | struct orig_node *orig_node; |
325 | struct neigh_node *neigh_node; | 393 | struct neigh_node *neigh_node; |
326 | int batman_count = 0; | 394 | int batman_count = 0; |
@@ -348,14 +416,11 @@ int orig_seq_print_text(struct seq_file *seq, void *offset) | |||
348 | "Originator", "last-seen", "#", TQ_MAX_VALUE, "Nexthop", | 416 | "Originator", "last-seen", "#", TQ_MAX_VALUE, "Nexthop", |
349 | "outgoingIF", "Potential nexthops"); | 417 | "outgoingIF", "Potential nexthops"); |
350 | 418 | ||
351 | spin_lock_bh(&bat_priv->orig_hash_lock); | ||
352 | |||
353 | for (i = 0; i < hash->size; i++) { | 419 | for (i = 0; i < hash->size; i++) { |
354 | head = &hash->table[i]; | 420 | head = &hash->table[i]; |
355 | 421 | ||
356 | hlist_for_each_entry(bucket, walk, head, hlist) { | 422 | rcu_read_lock(); |
357 | orig_node = bucket->data; | 423 | hlist_for_each_entry_rcu(orig_node, node, head, hash_entry) { |
358 | |||
359 | if (!orig_node->router) | 424 | if (!orig_node->router) |
360 | continue; | 425 | continue; |
361 | 426 | ||
@@ -374,8 +439,8 @@ int orig_seq_print_text(struct seq_file *seq, void *offset) | |||
374 | neigh_node->addr, | 439 | neigh_node->addr, |
375 | neigh_node->if_incoming->net_dev->name); | 440 | neigh_node->if_incoming->net_dev->name); |
376 | 441 | ||
377 | list_for_each_entry(neigh_node, &orig_node->neigh_list, | 442 | hlist_for_each_entry_rcu(neigh_node, node_tmp, |
378 | list) { | 443 | &orig_node->neigh_list, list) { |
379 | seq_printf(seq, " %pM (%3i)", neigh_node->addr, | 444 | seq_printf(seq, " %pM (%3i)", neigh_node->addr, |
380 | neigh_node->tq_avg); | 445 | neigh_node->tq_avg); |
381 | } | 446 | } |
@@ -383,10 +448,9 @@ int orig_seq_print_text(struct seq_file *seq, void *offset) | |||
383 | seq_printf(seq, "\n"); | 448 | seq_printf(seq, "\n"); |
384 | batman_count++; | 449 | batman_count++; |
385 | } | 450 | } |
451 | rcu_read_unlock(); | ||
386 | } | 452 | } |
387 | 453 | ||
388 | spin_unlock_bh(&bat_priv->orig_hash_lock); | ||
389 | |||
390 | if ((batman_count == 0)) | 454 | if ((batman_count == 0)) |
391 | seq_printf(seq, "No batman nodes in range ...\n"); | 455 | seq_printf(seq, "No batman nodes in range ...\n"); |
392 | 456 | ||
@@ -423,36 +487,36 @@ static int orig_node_add_if(struct orig_node *orig_node, int max_if_num) | |||
423 | return 0; | 487 | return 0; |
424 | } | 488 | } |
425 | 489 | ||
426 | int orig_hash_add_if(struct batman_if *batman_if, int max_if_num) | 490 | int orig_hash_add_if(struct hard_iface *hard_iface, int max_if_num) |
427 | { | 491 | { |
428 | struct bat_priv *bat_priv = netdev_priv(batman_if->soft_iface); | 492 | struct bat_priv *bat_priv = netdev_priv(hard_iface->soft_iface); |
429 | struct hashtable_t *hash = bat_priv->orig_hash; | 493 | struct hashtable_t *hash = bat_priv->orig_hash; |
430 | struct hlist_node *walk; | 494 | struct hlist_node *node; |
431 | struct hlist_head *head; | 495 | struct hlist_head *head; |
432 | struct element_t *bucket; | ||
433 | struct orig_node *orig_node; | 496 | struct orig_node *orig_node; |
434 | int i; | 497 | int i, ret; |
435 | 498 | ||
436 | /* resize all orig nodes because orig_node->bcast_own(_sum) depend on | 499 | /* resize all orig nodes because orig_node->bcast_own(_sum) depend on |
437 | * if_num */ | 500 | * if_num */ |
438 | spin_lock_bh(&bat_priv->orig_hash_lock); | ||
439 | |||
440 | for (i = 0; i < hash->size; i++) { | 501 | for (i = 0; i < hash->size; i++) { |
441 | head = &hash->table[i]; | 502 | head = &hash->table[i]; |
442 | 503 | ||
443 | hlist_for_each_entry(bucket, walk, head, hlist) { | 504 | rcu_read_lock(); |
444 | orig_node = bucket->data; | 505 | hlist_for_each_entry_rcu(orig_node, node, head, hash_entry) { |
506 | spin_lock_bh(&orig_node->ogm_cnt_lock); | ||
507 | ret = orig_node_add_if(orig_node, max_if_num); | ||
508 | spin_unlock_bh(&orig_node->ogm_cnt_lock); | ||
445 | 509 | ||
446 | if (orig_node_add_if(orig_node, max_if_num) == -1) | 510 | if (ret == -1) |
447 | goto err; | 511 | goto err; |
448 | } | 512 | } |
513 | rcu_read_unlock(); | ||
449 | } | 514 | } |
450 | 515 | ||
451 | spin_unlock_bh(&bat_priv->orig_hash_lock); | ||
452 | return 0; | 516 | return 0; |
453 | 517 | ||
454 | err: | 518 | err: |
455 | spin_unlock_bh(&bat_priv->orig_hash_lock); | 519 | rcu_read_unlock(); |
456 | return -ENOMEM; | 520 | return -ENOMEM; |
457 | } | 521 | } |
458 | 522 | ||
@@ -508,57 +572,55 @@ free_own_sum: | |||
508 | return 0; | 572 | return 0; |
509 | } | 573 | } |
510 | 574 | ||
511 | int orig_hash_del_if(struct batman_if *batman_if, int max_if_num) | 575 | int orig_hash_del_if(struct hard_iface *hard_iface, int max_if_num) |
512 | { | 576 | { |
513 | struct bat_priv *bat_priv = netdev_priv(batman_if->soft_iface); | 577 | struct bat_priv *bat_priv = netdev_priv(hard_iface->soft_iface); |
514 | struct hashtable_t *hash = bat_priv->orig_hash; | 578 | struct hashtable_t *hash = bat_priv->orig_hash; |
515 | struct hlist_node *walk; | 579 | struct hlist_node *node; |
516 | struct hlist_head *head; | 580 | struct hlist_head *head; |
517 | struct element_t *bucket; | 581 | struct hard_iface *hard_iface_tmp; |
518 | struct batman_if *batman_if_tmp; | ||
519 | struct orig_node *orig_node; | 582 | struct orig_node *orig_node; |
520 | int i, ret; | 583 | int i, ret; |
521 | 584 | ||
522 | /* resize all orig nodes because orig_node->bcast_own(_sum) depend on | 585 | /* resize all orig nodes because orig_node->bcast_own(_sum) depend on |
523 | * if_num */ | 586 | * if_num */ |
524 | spin_lock_bh(&bat_priv->orig_hash_lock); | ||
525 | |||
526 | for (i = 0; i < hash->size; i++) { | 587 | for (i = 0; i < hash->size; i++) { |
527 | head = &hash->table[i]; | 588 | head = &hash->table[i]; |
528 | 589 | ||
529 | hlist_for_each_entry(bucket, walk, head, hlist) { | 590 | rcu_read_lock(); |
530 | orig_node = bucket->data; | 591 | hlist_for_each_entry_rcu(orig_node, node, head, hash_entry) { |
531 | 592 | spin_lock_bh(&orig_node->ogm_cnt_lock); | |
532 | ret = orig_node_del_if(orig_node, max_if_num, | 593 | ret = orig_node_del_if(orig_node, max_if_num, |
533 | batman_if->if_num); | 594 | hard_iface->if_num); |
595 | spin_unlock_bh(&orig_node->ogm_cnt_lock); | ||
534 | 596 | ||
535 | if (ret == -1) | 597 | if (ret == -1) |
536 | goto err; | 598 | goto err; |
537 | } | 599 | } |
600 | rcu_read_unlock(); | ||
538 | } | 601 | } |
539 | 602 | ||
540 | /* renumber remaining batman interfaces _inside_ of orig_hash_lock */ | 603 | /* renumber remaining batman interfaces _inside_ of orig_hash_lock */ |
541 | rcu_read_lock(); | 604 | rcu_read_lock(); |
542 | list_for_each_entry_rcu(batman_if_tmp, &if_list, list) { | 605 | list_for_each_entry_rcu(hard_iface_tmp, &hardif_list, list) { |
543 | if (batman_if_tmp->if_status == IF_NOT_IN_USE) | 606 | if (hard_iface_tmp->if_status == IF_NOT_IN_USE) |
544 | continue; | 607 | continue; |
545 | 608 | ||
546 | if (batman_if == batman_if_tmp) | 609 | if (hard_iface == hard_iface_tmp) |
547 | continue; | 610 | continue; |
548 | 611 | ||
549 | if (batman_if->soft_iface != batman_if_tmp->soft_iface) | 612 | if (hard_iface->soft_iface != hard_iface_tmp->soft_iface) |
550 | continue; | 613 | continue; |
551 | 614 | ||
552 | if (batman_if_tmp->if_num > batman_if->if_num) | 615 | if (hard_iface_tmp->if_num > hard_iface->if_num) |
553 | batman_if_tmp->if_num--; | 616 | hard_iface_tmp->if_num--; |
554 | } | 617 | } |
555 | rcu_read_unlock(); | 618 | rcu_read_unlock(); |
556 | 619 | ||
557 | batman_if->if_num = -1; | 620 | hard_iface->if_num = -1; |
558 | spin_unlock_bh(&bat_priv->orig_hash_lock); | ||
559 | return 0; | 621 | return 0; |
560 | 622 | ||
561 | err: | 623 | err: |
562 | spin_unlock_bh(&bat_priv->orig_hash_lock); | 624 | rcu_read_unlock(); |
563 | return -ENOMEM; | 625 | return -ENOMEM; |
564 | } | 626 | } |
diff --git a/net/batman-adv/originator.h b/net/batman-adv/originator.h index 8019fbddffd0..5cc011057da1 100644 --- a/net/batman-adv/originator.h +++ b/net/batman-adv/originator.h | |||
@@ -22,21 +22,28 @@ | |||
22 | #ifndef _NET_BATMAN_ADV_ORIGINATOR_H_ | 22 | #ifndef _NET_BATMAN_ADV_ORIGINATOR_H_ |
23 | #define _NET_BATMAN_ADV_ORIGINATOR_H_ | 23 | #define _NET_BATMAN_ADV_ORIGINATOR_H_ |
24 | 24 | ||
25 | #include "hash.h" | ||
26 | |||
25 | int originator_init(struct bat_priv *bat_priv); | 27 | int originator_init(struct bat_priv *bat_priv); |
26 | void originator_free(struct bat_priv *bat_priv); | 28 | void originator_free(struct bat_priv *bat_priv); |
27 | void purge_orig_ref(struct bat_priv *bat_priv); | 29 | void purge_orig_ref(struct bat_priv *bat_priv); |
30 | void orig_node_free_ref(struct orig_node *orig_node); | ||
28 | struct orig_node *get_orig_node(struct bat_priv *bat_priv, uint8_t *addr); | 31 | struct orig_node *get_orig_node(struct bat_priv *bat_priv, uint8_t *addr); |
29 | struct neigh_node * | 32 | struct neigh_node *create_neighbor(struct orig_node *orig_node, |
30 | create_neighbor(struct orig_node *orig_node, struct orig_node *orig_neigh_node, | 33 | struct orig_node *orig_neigh_node, |
31 | uint8_t *neigh, struct batman_if *if_incoming); | 34 | uint8_t *neigh, |
35 | struct hard_iface *if_incoming); | ||
36 | void neigh_node_free_ref(struct neigh_node *neigh_node); | ||
32 | int orig_seq_print_text(struct seq_file *seq, void *offset); | 37 | int orig_seq_print_text(struct seq_file *seq, void *offset); |
33 | int orig_hash_add_if(struct batman_if *batman_if, int max_if_num); | 38 | int orig_hash_add_if(struct hard_iface *hard_iface, int max_if_num); |
34 | int orig_hash_del_if(struct batman_if *batman_if, int max_if_num); | 39 | int orig_hash_del_if(struct hard_iface *hard_iface, int max_if_num); |
35 | 40 | ||
36 | 41 | ||
37 | /* returns 1 if they are the same originator */ | 42 | /* returns 1 if they are the same originator */ |
38 | static inline int compare_orig(void *data1, void *data2) | 43 | static inline int compare_orig(struct hlist_node *node, void *data2) |
39 | { | 44 | { |
45 | void *data1 = container_of(node, struct orig_node, hash_entry); | ||
46 | |||
40 | return (memcmp(data1, data2, ETH_ALEN) == 0 ? 1 : 0); | 47 | return (memcmp(data1, data2, ETH_ALEN) == 0 ? 1 : 0); |
41 | } | 48 | } |
42 | 49 | ||
@@ -61,4 +68,35 @@ static inline int choose_orig(void *data, int32_t size) | |||
61 | return hash % size; | 68 | return hash % size; |
62 | } | 69 | } |
63 | 70 | ||
71 | static inline struct orig_node *orig_hash_find(struct bat_priv *bat_priv, | ||
72 | void *data) | ||
73 | { | ||
74 | struct hashtable_t *hash = bat_priv->orig_hash; | ||
75 | struct hlist_head *head; | ||
76 | struct hlist_node *node; | ||
77 | struct orig_node *orig_node, *orig_node_tmp = NULL; | ||
78 | int index; | ||
79 | |||
80 | if (!hash) | ||
81 | return NULL; | ||
82 | |||
83 | index = choose_orig(data, hash->size); | ||
84 | head = &hash->table[index]; | ||
85 | |||
86 | rcu_read_lock(); | ||
87 | hlist_for_each_entry_rcu(orig_node, node, head, hash_entry) { | ||
88 | if (!compare_eth(orig_node, data)) | ||
89 | continue; | ||
90 | |||
91 | if (!atomic_inc_not_zero(&orig_node->refcount)) | ||
92 | continue; | ||
93 | |||
94 | orig_node_tmp = orig_node; | ||
95 | break; | ||
96 | } | ||
97 | rcu_read_unlock(); | ||
98 | |||
99 | return orig_node_tmp; | ||
100 | } | ||
101 | |||
64 | #endif /* _NET_BATMAN_ADV_ORIGINATOR_H_ */ | 102 | #endif /* _NET_BATMAN_ADV_ORIGINATOR_H_ */ |
diff --git a/net/batman-adv/routing.c b/net/batman-adv/routing.c index 827414067e46..c172f5d0e05a 100644 --- a/net/batman-adv/routing.c +++ b/net/batman-adv/routing.c | |||
@@ -35,35 +35,33 @@ | |||
35 | #include "gateway_client.h" | 35 | #include "gateway_client.h" |
36 | #include "unicast.h" | 36 | #include "unicast.h" |
37 | 37 | ||
38 | void slide_own_bcast_window(struct batman_if *batman_if) | 38 | void slide_own_bcast_window(struct hard_iface *hard_iface) |
39 | { | 39 | { |
40 | struct bat_priv *bat_priv = netdev_priv(batman_if->soft_iface); | 40 | struct bat_priv *bat_priv = netdev_priv(hard_iface->soft_iface); |
41 | struct hashtable_t *hash = bat_priv->orig_hash; | 41 | struct hashtable_t *hash = bat_priv->orig_hash; |
42 | struct hlist_node *walk; | 42 | struct hlist_node *node; |
43 | struct hlist_head *head; | 43 | struct hlist_head *head; |
44 | struct element_t *bucket; | ||
45 | struct orig_node *orig_node; | 44 | struct orig_node *orig_node; |
46 | unsigned long *word; | 45 | unsigned long *word; |
47 | int i; | 46 | int i; |
48 | size_t word_index; | 47 | size_t word_index; |
49 | 48 | ||
50 | spin_lock_bh(&bat_priv->orig_hash_lock); | ||
51 | |||
52 | for (i = 0; i < hash->size; i++) { | 49 | for (i = 0; i < hash->size; i++) { |
53 | head = &hash->table[i]; | 50 | head = &hash->table[i]; |
54 | 51 | ||
55 | hlist_for_each_entry(bucket, walk, head, hlist) { | 52 | rcu_read_lock(); |
56 | orig_node = bucket->data; | 53 | hlist_for_each_entry_rcu(orig_node, node, head, hash_entry) { |
57 | word_index = batman_if->if_num * NUM_WORDS; | 54 | spin_lock_bh(&orig_node->ogm_cnt_lock); |
55 | word_index = hard_iface->if_num * NUM_WORDS; | ||
58 | word = &(orig_node->bcast_own[word_index]); | 56 | word = &(orig_node->bcast_own[word_index]); |
59 | 57 | ||
60 | bit_get_packet(bat_priv, word, 1, 0); | 58 | bit_get_packet(bat_priv, word, 1, 0); |
61 | orig_node->bcast_own_sum[batman_if->if_num] = | 59 | orig_node->bcast_own_sum[hard_iface->if_num] = |
62 | bit_packet_count(word); | 60 | bit_packet_count(word); |
61 | spin_unlock_bh(&orig_node->ogm_cnt_lock); | ||
63 | } | 62 | } |
63 | rcu_read_unlock(); | ||
64 | } | 64 | } |
65 | |||
66 | spin_unlock_bh(&bat_priv->orig_hash_lock); | ||
67 | } | 65 | } |
68 | 66 | ||
69 | static void update_HNA(struct bat_priv *bat_priv, struct orig_node *orig_node, | 67 | static void update_HNA(struct bat_priv *bat_priv, struct orig_node *orig_node, |
@@ -89,6 +87,8 @@ static void update_route(struct bat_priv *bat_priv, | |||
89 | struct neigh_node *neigh_node, | 87 | struct neigh_node *neigh_node, |
90 | unsigned char *hna_buff, int hna_buff_len) | 88 | unsigned char *hna_buff, int hna_buff_len) |
91 | { | 89 | { |
90 | struct neigh_node *neigh_node_tmp; | ||
91 | |||
92 | /* route deleted */ | 92 | /* route deleted */ |
93 | if ((orig_node->router) && (!neigh_node)) { | 93 | if ((orig_node->router) && (!neigh_node)) { |
94 | 94 | ||
@@ -115,7 +115,12 @@ static void update_route(struct bat_priv *bat_priv, | |||
115 | orig_node->router->addr); | 115 | orig_node->router->addr); |
116 | } | 116 | } |
117 | 117 | ||
118 | if (neigh_node && !atomic_inc_not_zero(&neigh_node->refcount)) | ||
119 | neigh_node = NULL; | ||
120 | neigh_node_tmp = orig_node->router; | ||
118 | orig_node->router = neigh_node; | 121 | orig_node->router = neigh_node; |
122 | if (neigh_node_tmp) | ||
123 | neigh_node_free_ref(neigh_node_tmp); | ||
119 | } | 124 | } |
120 | 125 | ||
121 | 126 | ||
@@ -138,73 +143,93 @@ void update_routes(struct bat_priv *bat_priv, struct orig_node *orig_node, | |||
138 | static int is_bidirectional_neigh(struct orig_node *orig_node, | 143 | static int is_bidirectional_neigh(struct orig_node *orig_node, |
139 | struct orig_node *orig_neigh_node, | 144 | struct orig_node *orig_neigh_node, |
140 | struct batman_packet *batman_packet, | 145 | struct batman_packet *batman_packet, |
141 | struct batman_if *if_incoming) | 146 | struct hard_iface *if_incoming) |
142 | { | 147 | { |
143 | struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface); | 148 | struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface); |
144 | struct neigh_node *neigh_node = NULL, *tmp_neigh_node = NULL; | 149 | struct neigh_node *neigh_node = NULL, *tmp_neigh_node; |
150 | struct hlist_node *node; | ||
145 | unsigned char total_count; | 151 | unsigned char total_count; |
152 | uint8_t orig_eq_count, neigh_rq_count, tq_own; | ||
153 | int tq_asym_penalty, ret = 0; | ||
146 | 154 | ||
147 | if (orig_node == orig_neigh_node) { | 155 | if (orig_node == orig_neigh_node) { |
148 | list_for_each_entry(tmp_neigh_node, | 156 | rcu_read_lock(); |
149 | &orig_node->neigh_list, | 157 | hlist_for_each_entry_rcu(tmp_neigh_node, node, |
150 | list) { | 158 | &orig_node->neigh_list, list) { |
151 | 159 | ||
152 | if (compare_orig(tmp_neigh_node->addr, | 160 | if (!compare_eth(tmp_neigh_node->addr, |
153 | orig_neigh_node->orig) && | 161 | orig_neigh_node->orig)) |
154 | (tmp_neigh_node->if_incoming == if_incoming)) | 162 | continue; |
155 | neigh_node = tmp_neigh_node; | 163 | |
164 | if (tmp_neigh_node->if_incoming != if_incoming) | ||
165 | continue; | ||
166 | |||
167 | if (!atomic_inc_not_zero(&tmp_neigh_node->refcount)) | ||
168 | continue; | ||
169 | |||
170 | neigh_node = tmp_neigh_node; | ||
156 | } | 171 | } |
172 | rcu_read_unlock(); | ||
157 | 173 | ||
158 | if (!neigh_node) | 174 | if (!neigh_node) |
159 | neigh_node = create_neighbor(orig_node, | 175 | neigh_node = create_neighbor(orig_node, |
160 | orig_neigh_node, | 176 | orig_neigh_node, |
161 | orig_neigh_node->orig, | 177 | orig_neigh_node->orig, |
162 | if_incoming); | 178 | if_incoming); |
163 | /* create_neighbor failed, return 0 */ | ||
164 | if (!neigh_node) | 179 | if (!neigh_node) |
165 | return 0; | 180 | goto out; |
166 | 181 | ||
167 | neigh_node->last_valid = jiffies; | 182 | neigh_node->last_valid = jiffies; |
168 | } else { | 183 | } else { |
169 | /* find packet count of corresponding one hop neighbor */ | 184 | /* find packet count of corresponding one hop neighbor */ |
170 | list_for_each_entry(tmp_neigh_node, | 185 | rcu_read_lock(); |
171 | &orig_neigh_node->neigh_list, list) { | 186 | hlist_for_each_entry_rcu(tmp_neigh_node, node, |
187 | &orig_neigh_node->neigh_list, list) { | ||
188 | |||
189 | if (!compare_eth(tmp_neigh_node->addr, | ||
190 | orig_neigh_node->orig)) | ||
191 | continue; | ||
192 | |||
193 | if (tmp_neigh_node->if_incoming != if_incoming) | ||
194 | continue; | ||
172 | 195 | ||
173 | if (compare_orig(tmp_neigh_node->addr, | 196 | if (!atomic_inc_not_zero(&tmp_neigh_node->refcount)) |
174 | orig_neigh_node->orig) && | 197 | continue; |
175 | (tmp_neigh_node->if_incoming == if_incoming)) | 198 | |
176 | neigh_node = tmp_neigh_node; | 199 | neigh_node = tmp_neigh_node; |
177 | } | 200 | } |
201 | rcu_read_unlock(); | ||
178 | 202 | ||
179 | if (!neigh_node) | 203 | if (!neigh_node) |
180 | neigh_node = create_neighbor(orig_neigh_node, | 204 | neigh_node = create_neighbor(orig_neigh_node, |
181 | orig_neigh_node, | 205 | orig_neigh_node, |
182 | orig_neigh_node->orig, | 206 | orig_neigh_node->orig, |
183 | if_incoming); | 207 | if_incoming); |
184 | /* create_neighbor failed, return 0 */ | ||
185 | if (!neigh_node) | 208 | if (!neigh_node) |
186 | return 0; | 209 | goto out; |
187 | } | 210 | } |
188 | 211 | ||
189 | orig_node->last_valid = jiffies; | 212 | orig_node->last_valid = jiffies; |
190 | 213 | ||
214 | spin_lock_bh(&orig_node->ogm_cnt_lock); | ||
215 | orig_eq_count = orig_neigh_node->bcast_own_sum[if_incoming->if_num]; | ||
216 | neigh_rq_count = neigh_node->real_packet_count; | ||
217 | spin_unlock_bh(&orig_node->ogm_cnt_lock); | ||
218 | |||
191 | /* pay attention to not get a value bigger than 100 % */ | 219 | /* pay attention to not get a value bigger than 100 % */ |
192 | total_count = (orig_neigh_node->bcast_own_sum[if_incoming->if_num] > | 220 | total_count = (orig_eq_count > neigh_rq_count ? |
193 | neigh_node->real_packet_count ? | 221 | neigh_rq_count : orig_eq_count); |
194 | neigh_node->real_packet_count : | ||
195 | orig_neigh_node->bcast_own_sum[if_incoming->if_num]); | ||
196 | 222 | ||
197 | /* if we have too few packets (too less data) we set tq_own to zero */ | 223 | /* if we have too few packets (too less data) we set tq_own to zero */ |
198 | /* if we receive too few packets it is not considered bidirectional */ | 224 | /* if we receive too few packets it is not considered bidirectional */ |
199 | if ((total_count < TQ_LOCAL_BIDRECT_SEND_MINIMUM) || | 225 | if ((total_count < TQ_LOCAL_BIDRECT_SEND_MINIMUM) || |
200 | (neigh_node->real_packet_count < TQ_LOCAL_BIDRECT_RECV_MINIMUM)) | 226 | (neigh_rq_count < TQ_LOCAL_BIDRECT_RECV_MINIMUM)) |
201 | orig_neigh_node->tq_own = 0; | 227 | tq_own = 0; |
202 | else | 228 | else |
203 | /* neigh_node->real_packet_count is never zero as we | 229 | /* neigh_node->real_packet_count is never zero as we |
204 | * only purge old information when getting new | 230 | * only purge old information when getting new |
205 | * information */ | 231 | * information */ |
206 | orig_neigh_node->tq_own = (TQ_MAX_VALUE * total_count) / | 232 | tq_own = (TQ_MAX_VALUE * total_count) / neigh_rq_count; |
207 | neigh_node->real_packet_count; | ||
208 | 233 | ||
209 | /* | 234 | /* |
210 | * 1 - ((1-x) ** 3), normalized to TQ_MAX_VALUE this does | 235 | * 1 - ((1-x) ** 3), normalized to TQ_MAX_VALUE this does |
@@ -212,20 +237,16 @@ static int is_bidirectional_neigh(struct orig_node *orig_node, | |||
212 | * punishes asymmetric links more. This will give a value | 237 | * punishes asymmetric links more. This will give a value |
213 | * between 0 and TQ_MAX_VALUE | 238 | * between 0 and TQ_MAX_VALUE |
214 | */ | 239 | */ |
215 | orig_neigh_node->tq_asym_penalty = | 240 | tq_asym_penalty = TQ_MAX_VALUE - (TQ_MAX_VALUE * |
216 | TQ_MAX_VALUE - | 241 | (TQ_LOCAL_WINDOW_SIZE - neigh_rq_count) * |
217 | (TQ_MAX_VALUE * | 242 | (TQ_LOCAL_WINDOW_SIZE - neigh_rq_count) * |
218 | (TQ_LOCAL_WINDOW_SIZE - neigh_node->real_packet_count) * | 243 | (TQ_LOCAL_WINDOW_SIZE - neigh_rq_count)) / |
219 | (TQ_LOCAL_WINDOW_SIZE - neigh_node->real_packet_count) * | 244 | (TQ_LOCAL_WINDOW_SIZE * |
220 | (TQ_LOCAL_WINDOW_SIZE - neigh_node->real_packet_count)) / | 245 | TQ_LOCAL_WINDOW_SIZE * |
221 | (TQ_LOCAL_WINDOW_SIZE * | 246 | TQ_LOCAL_WINDOW_SIZE); |
222 | TQ_LOCAL_WINDOW_SIZE * | 247 | |
223 | TQ_LOCAL_WINDOW_SIZE); | 248 | batman_packet->tq = ((batman_packet->tq * tq_own * tq_asym_penalty) / |
224 | 249 | (TQ_MAX_VALUE * TQ_MAX_VALUE)); | |
225 | batman_packet->tq = ((batman_packet->tq * | ||
226 | orig_neigh_node->tq_own * | ||
227 | orig_neigh_node->tq_asym_penalty) / | ||
228 | (TQ_MAX_VALUE * TQ_MAX_VALUE)); | ||
229 | 250 | ||
230 | bat_dbg(DBG_BATMAN, bat_priv, | 251 | bat_dbg(DBG_BATMAN, bat_priv, |
231 | "bidirectional: " | 252 | "bidirectional: " |
@@ -233,34 +254,141 @@ static int is_bidirectional_neigh(struct orig_node *orig_node, | |||
233 | "real recv = %2i, local tq: %3i, asym_penalty: %3i, " | 254 | "real recv = %2i, local tq: %3i, asym_penalty: %3i, " |
234 | "total tq: %3i\n", | 255 | "total tq: %3i\n", |
235 | orig_node->orig, orig_neigh_node->orig, total_count, | 256 | orig_node->orig, orig_neigh_node->orig, total_count, |
236 | neigh_node->real_packet_count, orig_neigh_node->tq_own, | 257 | neigh_rq_count, tq_own, tq_asym_penalty, batman_packet->tq); |
237 | orig_neigh_node->tq_asym_penalty, batman_packet->tq); | ||
238 | 258 | ||
239 | /* if link has the minimum required transmission quality | 259 | /* if link has the minimum required transmission quality |
240 | * consider it bidirectional */ | 260 | * consider it bidirectional */ |
241 | if (batman_packet->tq >= TQ_TOTAL_BIDRECT_LIMIT) | 261 | if (batman_packet->tq >= TQ_TOTAL_BIDRECT_LIMIT) |
242 | return 1; | 262 | ret = 1; |
243 | 263 | ||
244 | return 0; | 264 | out: |
265 | if (neigh_node) | ||
266 | neigh_node_free_ref(neigh_node); | ||
267 | return ret; | ||
268 | } | ||
269 | |||
270 | /* caller must hold the neigh_list_lock */ | ||
271 | void bonding_candidate_del(struct orig_node *orig_node, | ||
272 | struct neigh_node *neigh_node) | ||
273 | { | ||
274 | /* this neighbor is not part of our candidate list */ | ||
275 | if (list_empty(&neigh_node->bonding_list)) | ||
276 | goto out; | ||
277 | |||
278 | list_del_rcu(&neigh_node->bonding_list); | ||
279 | INIT_LIST_HEAD(&neigh_node->bonding_list); | ||
280 | neigh_node_free_ref(neigh_node); | ||
281 | atomic_dec(&orig_node->bond_candidates); | ||
282 | |||
283 | out: | ||
284 | return; | ||
285 | } | ||
286 | |||
287 | static void bonding_candidate_add(struct orig_node *orig_node, | ||
288 | struct neigh_node *neigh_node) | ||
289 | { | ||
290 | struct hlist_node *node; | ||
291 | struct neigh_node *tmp_neigh_node; | ||
292 | uint8_t best_tq, interference_candidate = 0; | ||
293 | |||
294 | spin_lock_bh(&orig_node->neigh_list_lock); | ||
295 | |||
296 | /* only consider if it has the same primary address ... */ | ||
297 | if (!compare_eth(orig_node->orig, | ||
298 | neigh_node->orig_node->primary_addr)) | ||
299 | goto candidate_del; | ||
300 | |||
301 | if (!orig_node->router) | ||
302 | goto candidate_del; | ||
303 | |||
304 | best_tq = orig_node->router->tq_avg; | ||
305 | |||
306 | /* ... and is good enough to be considered */ | ||
307 | if (neigh_node->tq_avg < best_tq - BONDING_TQ_THRESHOLD) | ||
308 | goto candidate_del; | ||
309 | |||
310 | /** | ||
311 | * check if we have another candidate with the same mac address or | ||
312 | * interface. If we do, we won't select this candidate because of | ||
313 | * possible interference. | ||
314 | */ | ||
315 | hlist_for_each_entry_rcu(tmp_neigh_node, node, | ||
316 | &orig_node->neigh_list, list) { | ||
317 | |||
318 | if (tmp_neigh_node == neigh_node) | ||
319 | continue; | ||
320 | |||
321 | /* we only care if the other candidate is even | ||
322 | * considered as candidate. */ | ||
323 | if (list_empty(&tmp_neigh_node->bonding_list)) | ||
324 | continue; | ||
325 | |||
326 | if ((neigh_node->if_incoming == tmp_neigh_node->if_incoming) || | ||
327 | (compare_eth(neigh_node->addr, tmp_neigh_node->addr))) { | ||
328 | interference_candidate = 1; | ||
329 | break; | ||
330 | } | ||
331 | } | ||
332 | |||
333 | /* don't care further if it is an interference candidate */ | ||
334 | if (interference_candidate) | ||
335 | goto candidate_del; | ||
336 | |||
337 | /* this neighbor already is part of our candidate list */ | ||
338 | if (!list_empty(&neigh_node->bonding_list)) | ||
339 | goto out; | ||
340 | |||
341 | if (!atomic_inc_not_zero(&neigh_node->refcount)) | ||
342 | goto out; | ||
343 | |||
344 | list_add_rcu(&neigh_node->bonding_list, &orig_node->bond_list); | ||
345 | atomic_inc(&orig_node->bond_candidates); | ||
346 | goto out; | ||
347 | |||
348 | candidate_del: | ||
349 | bonding_candidate_del(orig_node, neigh_node); | ||
350 | |||
351 | out: | ||
352 | spin_unlock_bh(&orig_node->neigh_list_lock); | ||
353 | return; | ||
354 | } | ||
355 | |||
356 | /* copy primary address for bonding */ | ||
357 | static void bonding_save_primary(struct orig_node *orig_node, | ||
358 | struct orig_node *orig_neigh_node, | ||
359 | struct batman_packet *batman_packet) | ||
360 | { | ||
361 | if (!(batman_packet->flags & PRIMARIES_FIRST_HOP)) | ||
362 | return; | ||
363 | |||
364 | memcpy(orig_neigh_node->primary_addr, orig_node->orig, ETH_ALEN); | ||
245 | } | 365 | } |
246 | 366 | ||
247 | static void update_orig(struct bat_priv *bat_priv, | 367 | static void update_orig(struct bat_priv *bat_priv, |
248 | struct orig_node *orig_node, | 368 | struct orig_node *orig_node, |
249 | struct ethhdr *ethhdr, | 369 | struct ethhdr *ethhdr, |
250 | struct batman_packet *batman_packet, | 370 | struct batman_packet *batman_packet, |
251 | struct batman_if *if_incoming, | 371 | struct hard_iface *if_incoming, |
252 | unsigned char *hna_buff, int hna_buff_len, | 372 | unsigned char *hna_buff, int hna_buff_len, |
253 | char is_duplicate) | 373 | char is_duplicate) |
254 | { | 374 | { |
255 | struct neigh_node *neigh_node = NULL, *tmp_neigh_node = NULL; | 375 | struct neigh_node *neigh_node = NULL, *tmp_neigh_node = NULL; |
376 | struct orig_node *orig_node_tmp; | ||
377 | struct hlist_node *node; | ||
256 | int tmp_hna_buff_len; | 378 | int tmp_hna_buff_len; |
379 | uint8_t bcast_own_sum_orig, bcast_own_sum_neigh; | ||
257 | 380 | ||
258 | bat_dbg(DBG_BATMAN, bat_priv, "update_originator(): " | 381 | bat_dbg(DBG_BATMAN, bat_priv, "update_originator(): " |
259 | "Searching and updating originator entry of received packet\n"); | 382 | "Searching and updating originator entry of received packet\n"); |
260 | 383 | ||
261 | list_for_each_entry(tmp_neigh_node, &orig_node->neigh_list, list) { | 384 | rcu_read_lock(); |
262 | if (compare_orig(tmp_neigh_node->addr, ethhdr->h_source) && | 385 | hlist_for_each_entry_rcu(tmp_neigh_node, node, |
263 | (tmp_neigh_node->if_incoming == if_incoming)) { | 386 | &orig_node->neigh_list, list) { |
387 | if (compare_eth(tmp_neigh_node->addr, ethhdr->h_source) && | ||
388 | (tmp_neigh_node->if_incoming == if_incoming) && | ||
389 | atomic_inc_not_zero(&tmp_neigh_node->refcount)) { | ||
390 | if (neigh_node) | ||
391 | neigh_node_free_ref(neigh_node); | ||
264 | neigh_node = tmp_neigh_node; | 392 | neigh_node = tmp_neigh_node; |
265 | continue; | 393 | continue; |
266 | } | 394 | } |
@@ -279,16 +407,20 @@ static void update_orig(struct bat_priv *bat_priv, | |||
279 | 407 | ||
280 | orig_tmp = get_orig_node(bat_priv, ethhdr->h_source); | 408 | orig_tmp = get_orig_node(bat_priv, ethhdr->h_source); |
281 | if (!orig_tmp) | 409 | if (!orig_tmp) |
282 | return; | 410 | goto unlock; |
283 | 411 | ||
284 | neigh_node = create_neighbor(orig_node, orig_tmp, | 412 | neigh_node = create_neighbor(orig_node, orig_tmp, |
285 | ethhdr->h_source, if_incoming); | 413 | ethhdr->h_source, if_incoming); |
414 | |||
415 | orig_node_free_ref(orig_tmp); | ||
286 | if (!neigh_node) | 416 | if (!neigh_node) |
287 | return; | 417 | goto unlock; |
288 | } else | 418 | } else |
289 | bat_dbg(DBG_BATMAN, bat_priv, | 419 | bat_dbg(DBG_BATMAN, bat_priv, |
290 | "Updating existing last-hop neighbor of originator\n"); | 420 | "Updating existing last-hop neighbor of originator\n"); |
291 | 421 | ||
422 | rcu_read_unlock(); | ||
423 | |||
292 | orig_node->flags = batman_packet->flags; | 424 | orig_node->flags = batman_packet->flags; |
293 | neigh_node->last_valid = jiffies; | 425 | neigh_node->last_valid = jiffies; |
294 | 426 | ||
@@ -302,6 +434,8 @@ static void update_orig(struct bat_priv *bat_priv, | |||
302 | neigh_node->last_ttl = batman_packet->ttl; | 434 | neigh_node->last_ttl = batman_packet->ttl; |
303 | } | 435 | } |
304 | 436 | ||
437 | bonding_candidate_add(orig_node, neigh_node); | ||
438 | |||
305 | tmp_hna_buff_len = (hna_buff_len > batman_packet->num_hna * ETH_ALEN ? | 439 | tmp_hna_buff_len = (hna_buff_len > batman_packet->num_hna * ETH_ALEN ? |
306 | batman_packet->num_hna * ETH_ALEN : hna_buff_len); | 440 | batman_packet->num_hna * ETH_ALEN : hna_buff_len); |
307 | 441 | ||
@@ -318,10 +452,22 @@ static void update_orig(struct bat_priv *bat_priv, | |||
318 | /* if the TQ is the same and the link not more symetric we | 452 | /* if the TQ is the same and the link not more symetric we |
319 | * won't consider it either */ | 453 | * won't consider it either */ |
320 | if ((orig_node->router) && | 454 | if ((orig_node->router) && |
321 | ((neigh_node->tq_avg == orig_node->router->tq_avg) && | 455 | (neigh_node->tq_avg == orig_node->router->tq_avg)) { |
322 | (orig_node->router->orig_node->bcast_own_sum[if_incoming->if_num] | 456 | orig_node_tmp = orig_node->router->orig_node; |
323 | >= neigh_node->orig_node->bcast_own_sum[if_incoming->if_num]))) | 457 | spin_lock_bh(&orig_node_tmp->ogm_cnt_lock); |
324 | goto update_hna; | 458 | bcast_own_sum_orig = |
459 | orig_node_tmp->bcast_own_sum[if_incoming->if_num]; | ||
460 | spin_unlock_bh(&orig_node_tmp->ogm_cnt_lock); | ||
461 | |||
462 | orig_node_tmp = neigh_node->orig_node; | ||
463 | spin_lock_bh(&orig_node_tmp->ogm_cnt_lock); | ||
464 | bcast_own_sum_neigh = | ||
465 | orig_node_tmp->bcast_own_sum[if_incoming->if_num]; | ||
466 | spin_unlock_bh(&orig_node_tmp->ogm_cnt_lock); | ||
467 | |||
468 | if (bcast_own_sum_orig >= bcast_own_sum_neigh) | ||
469 | goto update_hna; | ||
470 | } | ||
325 | 471 | ||
326 | update_routes(bat_priv, orig_node, neigh_node, | 472 | update_routes(bat_priv, orig_node, neigh_node, |
327 | hna_buff, tmp_hna_buff_len); | 473 | hna_buff, tmp_hna_buff_len); |
@@ -342,6 +488,14 @@ update_gw: | |||
342 | (atomic_read(&bat_priv->gw_mode) == GW_MODE_CLIENT) && | 488 | (atomic_read(&bat_priv->gw_mode) == GW_MODE_CLIENT) && |
343 | (atomic_read(&bat_priv->gw_sel_class) > 2)) | 489 | (atomic_read(&bat_priv->gw_sel_class) > 2)) |
344 | gw_check_election(bat_priv, orig_node); | 490 | gw_check_election(bat_priv, orig_node); |
491 | |||
492 | goto out; | ||
493 | |||
494 | unlock: | ||
495 | rcu_read_unlock(); | ||
496 | out: | ||
497 | if (neigh_node) | ||
498 | neigh_node_free_ref(neigh_node); | ||
345 | } | 499 | } |
346 | 500 | ||
347 | /* checks whether the host restarted and is in the protection time. | 501 | /* checks whether the host restarted and is in the protection time. |
@@ -379,34 +533,38 @@ static int window_protected(struct bat_priv *bat_priv, | |||
379 | */ | 533 | */ |
380 | static char count_real_packets(struct ethhdr *ethhdr, | 534 | static char count_real_packets(struct ethhdr *ethhdr, |
381 | struct batman_packet *batman_packet, | 535 | struct batman_packet *batman_packet, |
382 | struct batman_if *if_incoming) | 536 | struct hard_iface *if_incoming) |
383 | { | 537 | { |
384 | struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface); | 538 | struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface); |
385 | struct orig_node *orig_node; | 539 | struct orig_node *orig_node; |
386 | struct neigh_node *tmp_neigh_node; | 540 | struct neigh_node *tmp_neigh_node; |
541 | struct hlist_node *node; | ||
387 | char is_duplicate = 0; | 542 | char is_duplicate = 0; |
388 | int32_t seq_diff; | 543 | int32_t seq_diff; |
389 | int need_update = 0; | 544 | int need_update = 0; |
390 | int set_mark; | 545 | int set_mark, ret = -1; |
391 | 546 | ||
392 | orig_node = get_orig_node(bat_priv, batman_packet->orig); | 547 | orig_node = get_orig_node(bat_priv, batman_packet->orig); |
393 | if (!orig_node) | 548 | if (!orig_node) |
394 | return 0; | 549 | return 0; |
395 | 550 | ||
551 | spin_lock_bh(&orig_node->ogm_cnt_lock); | ||
396 | seq_diff = batman_packet->seqno - orig_node->last_real_seqno; | 552 | seq_diff = batman_packet->seqno - orig_node->last_real_seqno; |
397 | 553 | ||
398 | /* signalize caller that the packet is to be dropped. */ | 554 | /* signalize caller that the packet is to be dropped. */ |
399 | if (window_protected(bat_priv, seq_diff, | 555 | if (window_protected(bat_priv, seq_diff, |
400 | &orig_node->batman_seqno_reset)) | 556 | &orig_node->batman_seqno_reset)) |
401 | return -1; | 557 | goto out; |
402 | 558 | ||
403 | list_for_each_entry(tmp_neigh_node, &orig_node->neigh_list, list) { | 559 | rcu_read_lock(); |
560 | hlist_for_each_entry_rcu(tmp_neigh_node, node, | ||
561 | &orig_node->neigh_list, list) { | ||
404 | 562 | ||
405 | is_duplicate |= get_bit_status(tmp_neigh_node->real_bits, | 563 | is_duplicate |= get_bit_status(tmp_neigh_node->real_bits, |
406 | orig_node->last_real_seqno, | 564 | orig_node->last_real_seqno, |
407 | batman_packet->seqno); | 565 | batman_packet->seqno); |
408 | 566 | ||
409 | if (compare_orig(tmp_neigh_node->addr, ethhdr->h_source) && | 567 | if (compare_eth(tmp_neigh_node->addr, ethhdr->h_source) && |
410 | (tmp_neigh_node->if_incoming == if_incoming)) | 568 | (tmp_neigh_node->if_incoming == if_incoming)) |
411 | set_mark = 1; | 569 | set_mark = 1; |
412 | else | 570 | else |
@@ -420,6 +578,7 @@ static char count_real_packets(struct ethhdr *ethhdr, | |||
420 | tmp_neigh_node->real_packet_count = | 578 | tmp_neigh_node->real_packet_count = |
421 | bit_packet_count(tmp_neigh_node->real_bits); | 579 | bit_packet_count(tmp_neigh_node->real_bits); |
422 | } | 580 | } |
581 | rcu_read_unlock(); | ||
423 | 582 | ||
424 | if (need_update) { | 583 | if (need_update) { |
425 | bat_dbg(DBG_BATMAN, bat_priv, | 584 | bat_dbg(DBG_BATMAN, bat_priv, |
@@ -428,121 +587,21 @@ static char count_real_packets(struct ethhdr *ethhdr, | |||
428 | orig_node->last_real_seqno = batman_packet->seqno; | 587 | orig_node->last_real_seqno = batman_packet->seqno; |
429 | } | 588 | } |
430 | 589 | ||
431 | return is_duplicate; | 590 | ret = is_duplicate; |
432 | } | ||
433 | |||
434 | /* copy primary address for bonding */ | ||
435 | static void mark_bonding_address(struct orig_node *orig_node, | ||
436 | struct orig_node *orig_neigh_node, | ||
437 | struct batman_packet *batman_packet) | ||
438 | |||
439 | { | ||
440 | if (batman_packet->flags & PRIMARIES_FIRST_HOP) | ||
441 | memcpy(orig_neigh_node->primary_addr, | ||
442 | orig_node->orig, ETH_ALEN); | ||
443 | |||
444 | return; | ||
445 | } | ||
446 | |||
447 | /* mark possible bond.candidates in the neighbor list */ | ||
448 | void update_bonding_candidates(struct orig_node *orig_node) | ||
449 | { | ||
450 | int candidates; | ||
451 | int interference_candidate; | ||
452 | int best_tq; | ||
453 | struct neigh_node *tmp_neigh_node, *tmp_neigh_node2; | ||
454 | struct neigh_node *first_candidate, *last_candidate; | ||
455 | |||
456 | /* update the candidates for this originator */ | ||
457 | if (!orig_node->router) { | ||
458 | orig_node->bond.candidates = 0; | ||
459 | return; | ||
460 | } | ||
461 | |||
462 | best_tq = orig_node->router->tq_avg; | ||
463 | |||
464 | /* update bond.candidates */ | ||
465 | |||
466 | candidates = 0; | ||
467 | |||
468 | /* mark other nodes which also received "PRIMARIES FIRST HOP" packets | ||
469 | * as "bonding partner" */ | ||
470 | |||
471 | /* first, zero the list */ | ||
472 | list_for_each_entry(tmp_neigh_node, &orig_node->neigh_list, list) { | ||
473 | tmp_neigh_node->next_bond_candidate = NULL; | ||
474 | } | ||
475 | |||
476 | first_candidate = NULL; | ||
477 | last_candidate = NULL; | ||
478 | list_for_each_entry(tmp_neigh_node, &orig_node->neigh_list, list) { | ||
479 | |||
480 | /* only consider if it has the same primary address ... */ | ||
481 | if (memcmp(orig_node->orig, | ||
482 | tmp_neigh_node->orig_node->primary_addr, | ||
483 | ETH_ALEN) != 0) | ||
484 | continue; | ||
485 | |||
486 | /* ... and is good enough to be considered */ | ||
487 | if (tmp_neigh_node->tq_avg < best_tq - BONDING_TQ_THRESHOLD) | ||
488 | continue; | ||
489 | |||
490 | /* check if we have another candidate with the same | ||
491 | * mac address or interface. If we do, we won't | ||
492 | * select this candidate because of possible interference. */ | ||
493 | |||
494 | interference_candidate = 0; | ||
495 | list_for_each_entry(tmp_neigh_node2, | ||
496 | &orig_node->neigh_list, list) { | ||
497 | |||
498 | if (tmp_neigh_node2 == tmp_neigh_node) | ||
499 | continue; | ||
500 | |||
501 | /* we only care if the other candidate is even | ||
502 | * considered as candidate. */ | ||
503 | if (!tmp_neigh_node2->next_bond_candidate) | ||
504 | continue; | ||
505 | |||
506 | |||
507 | if ((tmp_neigh_node->if_incoming == | ||
508 | tmp_neigh_node2->if_incoming) | ||
509 | || (memcmp(tmp_neigh_node->addr, | ||
510 | tmp_neigh_node2->addr, ETH_ALEN) == 0)) { | ||
511 | |||
512 | interference_candidate = 1; | ||
513 | break; | ||
514 | } | ||
515 | } | ||
516 | /* don't care further if it is an interference candidate */ | ||
517 | if (interference_candidate) | ||
518 | continue; | ||
519 | |||
520 | if (!first_candidate) { | ||
521 | first_candidate = tmp_neigh_node; | ||
522 | tmp_neigh_node->next_bond_candidate = first_candidate; | ||
523 | } else | ||
524 | tmp_neigh_node->next_bond_candidate = last_candidate; | ||
525 | |||
526 | last_candidate = tmp_neigh_node; | ||
527 | |||
528 | candidates++; | ||
529 | } | ||
530 | |||
531 | if (candidates > 0) { | ||
532 | first_candidate->next_bond_candidate = last_candidate; | ||
533 | orig_node->bond.selected = first_candidate; | ||
534 | } | ||
535 | 591 | ||
536 | orig_node->bond.candidates = candidates; | 592 | out: |
593 | spin_unlock_bh(&orig_node->ogm_cnt_lock); | ||
594 | orig_node_free_ref(orig_node); | ||
595 | return ret; | ||
537 | } | 596 | } |
538 | 597 | ||
539 | void receive_bat_packet(struct ethhdr *ethhdr, | 598 | void receive_bat_packet(struct ethhdr *ethhdr, |
540 | struct batman_packet *batman_packet, | 599 | struct batman_packet *batman_packet, |
541 | unsigned char *hna_buff, int hna_buff_len, | 600 | unsigned char *hna_buff, int hna_buff_len, |
542 | struct batman_if *if_incoming) | 601 | struct hard_iface *if_incoming) |
543 | { | 602 | { |
544 | struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface); | 603 | struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface); |
545 | struct batman_if *batman_if; | 604 | struct hard_iface *hard_iface; |
546 | struct orig_node *orig_neigh_node, *orig_node; | 605 | struct orig_node *orig_neigh_node, *orig_node; |
547 | char has_directlink_flag; | 606 | char has_directlink_flag; |
548 | char is_my_addr = 0, is_my_orig = 0, is_my_oldorig = 0; | 607 | char is_my_addr = 0, is_my_orig = 0, is_my_oldorig = 0; |
@@ -570,8 +629,8 @@ void receive_bat_packet(struct ethhdr *ethhdr, | |||
570 | 629 | ||
571 | has_directlink_flag = (batman_packet->flags & DIRECTLINK ? 1 : 0); | 630 | has_directlink_flag = (batman_packet->flags & DIRECTLINK ? 1 : 0); |
572 | 631 | ||
573 | is_single_hop_neigh = (compare_orig(ethhdr->h_source, | 632 | is_single_hop_neigh = (compare_eth(ethhdr->h_source, |
574 | batman_packet->orig) ? 1 : 0); | 633 | batman_packet->orig) ? 1 : 0); |
575 | 634 | ||
576 | bat_dbg(DBG_BATMAN, bat_priv, | 635 | bat_dbg(DBG_BATMAN, bat_priv, |
577 | "Received BATMAN packet via NB: %pM, IF: %s [%pM] " | 636 | "Received BATMAN packet via NB: %pM, IF: %s [%pM] " |
@@ -584,26 +643,26 @@ void receive_bat_packet(struct ethhdr *ethhdr, | |||
584 | has_directlink_flag); | 643 | has_directlink_flag); |
585 | 644 | ||
586 | rcu_read_lock(); | 645 | rcu_read_lock(); |
587 | list_for_each_entry_rcu(batman_if, &if_list, list) { | 646 | list_for_each_entry_rcu(hard_iface, &hardif_list, list) { |
588 | if (batman_if->if_status != IF_ACTIVE) | 647 | if (hard_iface->if_status != IF_ACTIVE) |
589 | continue; | 648 | continue; |
590 | 649 | ||
591 | if (batman_if->soft_iface != if_incoming->soft_iface) | 650 | if (hard_iface->soft_iface != if_incoming->soft_iface) |
592 | continue; | 651 | continue; |
593 | 652 | ||
594 | if (compare_orig(ethhdr->h_source, | 653 | if (compare_eth(ethhdr->h_source, |
595 | batman_if->net_dev->dev_addr)) | 654 | hard_iface->net_dev->dev_addr)) |
596 | is_my_addr = 1; | 655 | is_my_addr = 1; |
597 | 656 | ||
598 | if (compare_orig(batman_packet->orig, | 657 | if (compare_eth(batman_packet->orig, |
599 | batman_if->net_dev->dev_addr)) | 658 | hard_iface->net_dev->dev_addr)) |
600 | is_my_orig = 1; | 659 | is_my_orig = 1; |
601 | 660 | ||
602 | if (compare_orig(batman_packet->prev_sender, | 661 | if (compare_eth(batman_packet->prev_sender, |
603 | batman_if->net_dev->dev_addr)) | 662 | hard_iface->net_dev->dev_addr)) |
604 | is_my_oldorig = 1; | 663 | is_my_oldorig = 1; |
605 | 664 | ||
606 | if (compare_orig(ethhdr->h_source, broadcast_addr)) | 665 | if (compare_eth(ethhdr->h_source, broadcast_addr)) |
607 | is_broadcast = 1; | 666 | is_broadcast = 1; |
608 | } | 667 | } |
609 | rcu_read_unlock(); | 668 | rcu_read_unlock(); |
@@ -635,7 +694,6 @@ void receive_bat_packet(struct ethhdr *ethhdr, | |||
635 | int offset; | 694 | int offset; |
636 | 695 | ||
637 | orig_neigh_node = get_orig_node(bat_priv, ethhdr->h_source); | 696 | orig_neigh_node = get_orig_node(bat_priv, ethhdr->h_source); |
638 | |||
639 | if (!orig_neigh_node) | 697 | if (!orig_neigh_node) |
640 | return; | 698 | return; |
641 | 699 | ||
@@ -644,18 +702,22 @@ void receive_bat_packet(struct ethhdr *ethhdr, | |||
644 | /* if received seqno equals last send seqno save new | 702 | /* if received seqno equals last send seqno save new |
645 | * seqno for bidirectional check */ | 703 | * seqno for bidirectional check */ |
646 | if (has_directlink_flag && | 704 | if (has_directlink_flag && |
647 | compare_orig(if_incoming->net_dev->dev_addr, | 705 | compare_eth(if_incoming->net_dev->dev_addr, |
648 | batman_packet->orig) && | 706 | batman_packet->orig) && |
649 | (batman_packet->seqno - if_incoming_seqno + 2 == 0)) { | 707 | (batman_packet->seqno - if_incoming_seqno + 2 == 0)) { |
650 | offset = if_incoming->if_num * NUM_WORDS; | 708 | offset = if_incoming->if_num * NUM_WORDS; |
709 | |||
710 | spin_lock_bh(&orig_neigh_node->ogm_cnt_lock); | ||
651 | word = &(orig_neigh_node->bcast_own[offset]); | 711 | word = &(orig_neigh_node->bcast_own[offset]); |
652 | bit_mark(word, 0); | 712 | bit_mark(word, 0); |
653 | orig_neigh_node->bcast_own_sum[if_incoming->if_num] = | 713 | orig_neigh_node->bcast_own_sum[if_incoming->if_num] = |
654 | bit_packet_count(word); | 714 | bit_packet_count(word); |
715 | spin_unlock_bh(&orig_neigh_node->ogm_cnt_lock); | ||
655 | } | 716 | } |
656 | 717 | ||
657 | bat_dbg(DBG_BATMAN, bat_priv, "Drop packet: " | 718 | bat_dbg(DBG_BATMAN, bat_priv, "Drop packet: " |
658 | "originator packet from myself (via neighbor)\n"); | 719 | "originator packet from myself (via neighbor)\n"); |
720 | orig_node_free_ref(orig_neigh_node); | ||
659 | return; | 721 | return; |
660 | } | 722 | } |
661 | 723 | ||
@@ -676,27 +738,27 @@ void receive_bat_packet(struct ethhdr *ethhdr, | |||
676 | bat_dbg(DBG_BATMAN, bat_priv, | 738 | bat_dbg(DBG_BATMAN, bat_priv, |
677 | "Drop packet: packet within seqno protection time " | 739 | "Drop packet: packet within seqno protection time " |
678 | "(sender: %pM)\n", ethhdr->h_source); | 740 | "(sender: %pM)\n", ethhdr->h_source); |
679 | return; | 741 | goto out; |
680 | } | 742 | } |
681 | 743 | ||
682 | if (batman_packet->tq == 0) { | 744 | if (batman_packet->tq == 0) { |
683 | bat_dbg(DBG_BATMAN, bat_priv, | 745 | bat_dbg(DBG_BATMAN, bat_priv, |
684 | "Drop packet: originator packet with tq equal 0\n"); | 746 | "Drop packet: originator packet with tq equal 0\n"); |
685 | return; | 747 | goto out; |
686 | } | 748 | } |
687 | 749 | ||
688 | /* avoid temporary routing loops */ | 750 | /* avoid temporary routing loops */ |
689 | if ((orig_node->router) && | 751 | if ((orig_node->router) && |
690 | (orig_node->router->orig_node->router) && | 752 | (orig_node->router->orig_node->router) && |
691 | (compare_orig(orig_node->router->addr, | 753 | (compare_eth(orig_node->router->addr, |
692 | batman_packet->prev_sender)) && | 754 | batman_packet->prev_sender)) && |
693 | !(compare_orig(batman_packet->orig, batman_packet->prev_sender)) && | 755 | !(compare_eth(batman_packet->orig, batman_packet->prev_sender)) && |
694 | (compare_orig(orig_node->router->addr, | 756 | (compare_eth(orig_node->router->addr, |
695 | orig_node->router->orig_node->router->addr))) { | 757 | orig_node->router->orig_node->router->addr))) { |
696 | bat_dbg(DBG_BATMAN, bat_priv, | 758 | bat_dbg(DBG_BATMAN, bat_priv, |
697 | "Drop packet: ignoring all rebroadcast packets that " | 759 | "Drop packet: ignoring all rebroadcast packets that " |
698 | "may make me loop (sender: %pM)\n", ethhdr->h_source); | 760 | "may make me loop (sender: %pM)\n", ethhdr->h_source); |
699 | return; | 761 | goto out; |
700 | } | 762 | } |
701 | 763 | ||
702 | /* if sender is a direct neighbor the sender mac equals | 764 | /* if sender is a direct neighbor the sender mac equals |
@@ -705,19 +767,21 @@ void receive_bat_packet(struct ethhdr *ethhdr, | |||
705 | orig_node : | 767 | orig_node : |
706 | get_orig_node(bat_priv, ethhdr->h_source)); | 768 | get_orig_node(bat_priv, ethhdr->h_source)); |
707 | if (!orig_neigh_node) | 769 | if (!orig_neigh_node) |
708 | return; | 770 | goto out; |
709 | 771 | ||
710 | /* drop packet if sender is not a direct neighbor and if we | 772 | /* drop packet if sender is not a direct neighbor and if we |
711 | * don't route towards it */ | 773 | * don't route towards it */ |
712 | if (!is_single_hop_neigh && (!orig_neigh_node->router)) { | 774 | if (!is_single_hop_neigh && (!orig_neigh_node->router)) { |
713 | bat_dbg(DBG_BATMAN, bat_priv, | 775 | bat_dbg(DBG_BATMAN, bat_priv, |
714 | "Drop packet: OGM via unknown neighbor!\n"); | 776 | "Drop packet: OGM via unknown neighbor!\n"); |
715 | return; | 777 | goto out_neigh; |
716 | } | 778 | } |
717 | 779 | ||
718 | is_bidirectional = is_bidirectional_neigh(orig_node, orig_neigh_node, | 780 | is_bidirectional = is_bidirectional_neigh(orig_node, orig_neigh_node, |
719 | batman_packet, if_incoming); | 781 | batman_packet, if_incoming); |
720 | 782 | ||
783 | bonding_save_primary(orig_node, orig_neigh_node, batman_packet); | ||
784 | |||
721 | /* update ranking if it is not a duplicate or has the same | 785 | /* update ranking if it is not a duplicate or has the same |
722 | * seqno and similar ttl as the non-duplicate */ | 786 | * seqno and similar ttl as the non-duplicate */ |
723 | if (is_bidirectional && | 787 | if (is_bidirectional && |
@@ -727,9 +791,6 @@ void receive_bat_packet(struct ethhdr *ethhdr, | |||
727 | update_orig(bat_priv, orig_node, ethhdr, batman_packet, | 791 | update_orig(bat_priv, orig_node, ethhdr, batman_packet, |
728 | if_incoming, hna_buff, hna_buff_len, is_duplicate); | 792 | if_incoming, hna_buff, hna_buff_len, is_duplicate); |
729 | 793 | ||
730 | mark_bonding_address(orig_node, orig_neigh_node, batman_packet); | ||
731 | update_bonding_candidates(orig_node); | ||
732 | |||
733 | /* is single hop (direct) neighbor */ | 794 | /* is single hop (direct) neighbor */ |
734 | if (is_single_hop_neigh) { | 795 | if (is_single_hop_neigh) { |
735 | 796 | ||
@@ -739,31 +800,36 @@ void receive_bat_packet(struct ethhdr *ethhdr, | |||
739 | 800 | ||
740 | bat_dbg(DBG_BATMAN, bat_priv, "Forwarding packet: " | 801 | bat_dbg(DBG_BATMAN, bat_priv, "Forwarding packet: " |
741 | "rebroadcast neighbor packet with direct link flag\n"); | 802 | "rebroadcast neighbor packet with direct link flag\n"); |
742 | return; | 803 | goto out_neigh; |
743 | } | 804 | } |
744 | 805 | ||
745 | /* multihop originator */ | 806 | /* multihop originator */ |
746 | if (!is_bidirectional) { | 807 | if (!is_bidirectional) { |
747 | bat_dbg(DBG_BATMAN, bat_priv, | 808 | bat_dbg(DBG_BATMAN, bat_priv, |
748 | "Drop packet: not received via bidirectional link\n"); | 809 | "Drop packet: not received via bidirectional link\n"); |
749 | return; | 810 | goto out_neigh; |
750 | } | 811 | } |
751 | 812 | ||
752 | if (is_duplicate) { | 813 | if (is_duplicate) { |
753 | bat_dbg(DBG_BATMAN, bat_priv, | 814 | bat_dbg(DBG_BATMAN, bat_priv, |
754 | "Drop packet: duplicate packet received\n"); | 815 | "Drop packet: duplicate packet received\n"); |
755 | return; | 816 | goto out_neigh; |
756 | } | 817 | } |
757 | 818 | ||
758 | bat_dbg(DBG_BATMAN, bat_priv, | 819 | bat_dbg(DBG_BATMAN, bat_priv, |
759 | "Forwarding packet: rebroadcast originator packet\n"); | 820 | "Forwarding packet: rebroadcast originator packet\n"); |
760 | schedule_forward_packet(orig_node, ethhdr, batman_packet, | 821 | schedule_forward_packet(orig_node, ethhdr, batman_packet, |
761 | 0, hna_buff_len, if_incoming); | 822 | 0, hna_buff_len, if_incoming); |
823 | |||
824 | out_neigh: | ||
825 | if ((orig_neigh_node) && (!is_single_hop_neigh)) | ||
826 | orig_node_free_ref(orig_neigh_node); | ||
827 | out: | ||
828 | orig_node_free_ref(orig_node); | ||
762 | } | 829 | } |
763 | 830 | ||
764 | int recv_bat_packet(struct sk_buff *skb, struct batman_if *batman_if) | 831 | int recv_bat_packet(struct sk_buff *skb, struct hard_iface *hard_iface) |
765 | { | 832 | { |
766 | struct bat_priv *bat_priv = netdev_priv(batman_if->soft_iface); | ||
767 | struct ethhdr *ethhdr; | 833 | struct ethhdr *ethhdr; |
768 | 834 | ||
769 | /* drop packet if it has not necessary minimum size */ | 835 | /* drop packet if it has not necessary minimum size */ |
@@ -790,12 +856,10 @@ int recv_bat_packet(struct sk_buff *skb, struct batman_if *batman_if) | |||
790 | 856 | ||
791 | ethhdr = (struct ethhdr *)skb_mac_header(skb); | 857 | ethhdr = (struct ethhdr *)skb_mac_header(skb); |
792 | 858 | ||
793 | spin_lock_bh(&bat_priv->orig_hash_lock); | ||
794 | receive_aggr_bat_packet(ethhdr, | 859 | receive_aggr_bat_packet(ethhdr, |
795 | skb->data, | 860 | skb->data, |
796 | skb_headlen(skb), | 861 | skb_headlen(skb), |
797 | batman_if); | 862 | hard_iface); |
798 | spin_unlock_bh(&bat_priv->orig_hash_lock); | ||
799 | 863 | ||
800 | kfree_skb(skb); | 864 | kfree_skb(skb); |
801 | return NET_RX_SUCCESS; | 865 | return NET_RX_SUCCESS; |
@@ -804,68 +868,75 @@ int recv_bat_packet(struct sk_buff *skb, struct batman_if *batman_if) | |||
804 | static int recv_my_icmp_packet(struct bat_priv *bat_priv, | 868 | static int recv_my_icmp_packet(struct bat_priv *bat_priv, |
805 | struct sk_buff *skb, size_t icmp_len) | 869 | struct sk_buff *skb, size_t icmp_len) |
806 | { | 870 | { |
807 | struct orig_node *orig_node; | 871 | struct orig_node *orig_node = NULL; |
872 | struct neigh_node *neigh_node = NULL; | ||
808 | struct icmp_packet_rr *icmp_packet; | 873 | struct icmp_packet_rr *icmp_packet; |
809 | struct batman_if *batman_if; | 874 | int ret = NET_RX_DROP; |
810 | int ret; | ||
811 | uint8_t dstaddr[ETH_ALEN]; | ||
812 | 875 | ||
813 | icmp_packet = (struct icmp_packet_rr *)skb->data; | 876 | icmp_packet = (struct icmp_packet_rr *)skb->data; |
814 | 877 | ||
815 | /* add data to device queue */ | 878 | /* add data to device queue */ |
816 | if (icmp_packet->msg_type != ECHO_REQUEST) { | 879 | if (icmp_packet->msg_type != ECHO_REQUEST) { |
817 | bat_socket_receive_packet(icmp_packet, icmp_len); | 880 | bat_socket_receive_packet(icmp_packet, icmp_len); |
818 | return NET_RX_DROP; | 881 | goto out; |
819 | } | 882 | } |
820 | 883 | ||
821 | if (!bat_priv->primary_if) | 884 | if (!bat_priv->primary_if) |
822 | return NET_RX_DROP; | 885 | goto out; |
823 | 886 | ||
824 | /* answer echo request (ping) */ | 887 | /* answer echo request (ping) */ |
825 | /* get routing information */ | 888 | /* get routing information */ |
826 | spin_lock_bh(&bat_priv->orig_hash_lock); | 889 | rcu_read_lock(); |
827 | orig_node = ((struct orig_node *)hash_find(bat_priv->orig_hash, | 890 | orig_node = orig_hash_find(bat_priv, icmp_packet->orig); |
828 | compare_orig, choose_orig, | 891 | |
829 | icmp_packet->orig)); | 892 | if (!orig_node) |
830 | ret = NET_RX_DROP; | 893 | goto unlock; |
831 | |||
832 | if ((orig_node) && (orig_node->router)) { | ||
833 | |||
834 | /* don't lock while sending the packets ... we therefore | ||
835 | * copy the required data before sending */ | ||
836 | batman_if = orig_node->router->if_incoming; | ||
837 | memcpy(dstaddr, orig_node->router->addr, ETH_ALEN); | ||
838 | spin_unlock_bh(&bat_priv->orig_hash_lock); | ||
839 | |||
840 | /* create a copy of the skb, if needed, to modify it. */ | ||
841 | if (skb_cow(skb, sizeof(struct ethhdr)) < 0) | ||
842 | return NET_RX_DROP; | ||
843 | 894 | ||
844 | icmp_packet = (struct icmp_packet_rr *)skb->data; | 895 | neigh_node = orig_node->router; |
845 | 896 | ||
846 | memcpy(icmp_packet->dst, icmp_packet->orig, ETH_ALEN); | 897 | if (!neigh_node) |
847 | memcpy(icmp_packet->orig, | 898 | goto unlock; |
848 | bat_priv->primary_if->net_dev->dev_addr, ETH_ALEN); | ||
849 | icmp_packet->msg_type = ECHO_REPLY; | ||
850 | icmp_packet->ttl = TTL; | ||
851 | 899 | ||
852 | send_skb_packet(skb, batman_if, dstaddr); | 900 | if (!atomic_inc_not_zero(&neigh_node->refcount)) { |
853 | ret = NET_RX_SUCCESS; | 901 | neigh_node = NULL; |
902 | goto unlock; | ||
903 | } | ||
854 | 904 | ||
855 | } else | 905 | rcu_read_unlock(); |
856 | spin_unlock_bh(&bat_priv->orig_hash_lock); | 906 | |
907 | /* create a copy of the skb, if needed, to modify it. */ | ||
908 | if (skb_cow(skb, sizeof(struct ethhdr)) < 0) | ||
909 | goto out; | ||
910 | |||
911 | icmp_packet = (struct icmp_packet_rr *)skb->data; | ||
857 | 912 | ||
913 | memcpy(icmp_packet->dst, icmp_packet->orig, ETH_ALEN); | ||
914 | memcpy(icmp_packet->orig, | ||
915 | bat_priv->primary_if->net_dev->dev_addr, ETH_ALEN); | ||
916 | icmp_packet->msg_type = ECHO_REPLY; | ||
917 | icmp_packet->ttl = TTL; | ||
918 | |||
919 | send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr); | ||
920 | ret = NET_RX_SUCCESS; | ||
921 | goto out; | ||
922 | |||
923 | unlock: | ||
924 | rcu_read_unlock(); | ||
925 | out: | ||
926 | if (neigh_node) | ||
927 | neigh_node_free_ref(neigh_node); | ||
928 | if (orig_node) | ||
929 | orig_node_free_ref(orig_node); | ||
858 | return ret; | 930 | return ret; |
859 | } | 931 | } |
860 | 932 | ||
861 | static int recv_icmp_ttl_exceeded(struct bat_priv *bat_priv, | 933 | static int recv_icmp_ttl_exceeded(struct bat_priv *bat_priv, |
862 | struct sk_buff *skb) | 934 | struct sk_buff *skb) |
863 | { | 935 | { |
864 | struct orig_node *orig_node; | 936 | struct orig_node *orig_node = NULL; |
937 | struct neigh_node *neigh_node = NULL; | ||
865 | struct icmp_packet *icmp_packet; | 938 | struct icmp_packet *icmp_packet; |
866 | struct batman_if *batman_if; | 939 | int ret = NET_RX_DROP; |
867 | int ret; | ||
868 | uint8_t dstaddr[ETH_ALEN]; | ||
869 | 940 | ||
870 | icmp_packet = (struct icmp_packet *)skb->data; | 941 | icmp_packet = (struct icmp_packet *)skb->data; |
871 | 942 | ||
@@ -874,59 +945,67 @@ static int recv_icmp_ttl_exceeded(struct bat_priv *bat_priv, | |||
874 | pr_debug("Warning - can't forward icmp packet from %pM to " | 945 | pr_debug("Warning - can't forward icmp packet from %pM to " |
875 | "%pM: ttl exceeded\n", icmp_packet->orig, | 946 | "%pM: ttl exceeded\n", icmp_packet->orig, |
876 | icmp_packet->dst); | 947 | icmp_packet->dst); |
877 | return NET_RX_DROP; | 948 | goto out; |
878 | } | 949 | } |
879 | 950 | ||
880 | if (!bat_priv->primary_if) | 951 | if (!bat_priv->primary_if) |
881 | return NET_RX_DROP; | 952 | goto out; |
882 | 953 | ||
883 | /* get routing information */ | 954 | /* get routing information */ |
884 | spin_lock_bh(&bat_priv->orig_hash_lock); | 955 | rcu_read_lock(); |
885 | orig_node = ((struct orig_node *) | 956 | orig_node = orig_hash_find(bat_priv, icmp_packet->orig); |
886 | hash_find(bat_priv->orig_hash, compare_orig, choose_orig, | ||
887 | icmp_packet->orig)); | ||
888 | ret = NET_RX_DROP; | ||
889 | |||
890 | if ((orig_node) && (orig_node->router)) { | ||
891 | |||
892 | /* don't lock while sending the packets ... we therefore | ||
893 | * copy the required data before sending */ | ||
894 | batman_if = orig_node->router->if_incoming; | ||
895 | memcpy(dstaddr, orig_node->router->addr, ETH_ALEN); | ||
896 | spin_unlock_bh(&bat_priv->orig_hash_lock); | ||
897 | |||
898 | /* create a copy of the skb, if needed, to modify it. */ | ||
899 | if (skb_cow(skb, sizeof(struct ethhdr)) < 0) | ||
900 | return NET_RX_DROP; | ||
901 | 957 | ||
902 | icmp_packet = (struct icmp_packet *) skb->data; | 958 | if (!orig_node) |
959 | goto unlock; | ||
903 | 960 | ||
904 | memcpy(icmp_packet->dst, icmp_packet->orig, ETH_ALEN); | 961 | neigh_node = orig_node->router; |
905 | memcpy(icmp_packet->orig, | ||
906 | bat_priv->primary_if->net_dev->dev_addr, ETH_ALEN); | ||
907 | icmp_packet->msg_type = TTL_EXCEEDED; | ||
908 | icmp_packet->ttl = TTL; | ||
909 | 962 | ||
910 | send_skb_packet(skb, batman_if, dstaddr); | 963 | if (!neigh_node) |
911 | ret = NET_RX_SUCCESS; | 964 | goto unlock; |
912 | 965 | ||
913 | } else | 966 | if (!atomic_inc_not_zero(&neigh_node->refcount)) { |
914 | spin_unlock_bh(&bat_priv->orig_hash_lock); | 967 | neigh_node = NULL; |
968 | goto unlock; | ||
969 | } | ||
970 | |||
971 | rcu_read_unlock(); | ||
972 | |||
973 | /* create a copy of the skb, if needed, to modify it. */ | ||
974 | if (skb_cow(skb, sizeof(struct ethhdr)) < 0) | ||
975 | goto out; | ||
915 | 976 | ||
977 | icmp_packet = (struct icmp_packet *)skb->data; | ||
978 | |||
979 | memcpy(icmp_packet->dst, icmp_packet->orig, ETH_ALEN); | ||
980 | memcpy(icmp_packet->orig, | ||
981 | bat_priv->primary_if->net_dev->dev_addr, ETH_ALEN); | ||
982 | icmp_packet->msg_type = TTL_EXCEEDED; | ||
983 | icmp_packet->ttl = TTL; | ||
984 | |||
985 | send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr); | ||
986 | ret = NET_RX_SUCCESS; | ||
987 | goto out; | ||
988 | |||
989 | unlock: | ||
990 | rcu_read_unlock(); | ||
991 | out: | ||
992 | if (neigh_node) | ||
993 | neigh_node_free_ref(neigh_node); | ||
994 | if (orig_node) | ||
995 | orig_node_free_ref(orig_node); | ||
916 | return ret; | 996 | return ret; |
917 | } | 997 | } |
918 | 998 | ||
919 | 999 | ||
920 | int recv_icmp_packet(struct sk_buff *skb, struct batman_if *recv_if) | 1000 | int recv_icmp_packet(struct sk_buff *skb, struct hard_iface *recv_if) |
921 | { | 1001 | { |
922 | struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface); | 1002 | struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface); |
923 | struct icmp_packet_rr *icmp_packet; | 1003 | struct icmp_packet_rr *icmp_packet; |
924 | struct ethhdr *ethhdr; | 1004 | struct ethhdr *ethhdr; |
925 | struct orig_node *orig_node; | 1005 | struct orig_node *orig_node = NULL; |
926 | struct batman_if *batman_if; | 1006 | struct neigh_node *neigh_node = NULL; |
927 | int hdr_size = sizeof(struct icmp_packet); | 1007 | int hdr_size = sizeof(struct icmp_packet); |
928 | int ret; | 1008 | int ret = NET_RX_DROP; |
929 | uint8_t dstaddr[ETH_ALEN]; | ||
930 | 1009 | ||
931 | /** | 1010 | /** |
932 | * we truncate all incoming icmp packets if they don't match our size | 1011 | * we truncate all incoming icmp packets if they don't match our size |
@@ -936,21 +1015,21 @@ int recv_icmp_packet(struct sk_buff *skb, struct batman_if *recv_if) | |||
936 | 1015 | ||
937 | /* drop packet if it has not necessary minimum size */ | 1016 | /* drop packet if it has not necessary minimum size */ |
938 | if (unlikely(!pskb_may_pull(skb, hdr_size))) | 1017 | if (unlikely(!pskb_may_pull(skb, hdr_size))) |
939 | return NET_RX_DROP; | 1018 | goto out; |
940 | 1019 | ||
941 | ethhdr = (struct ethhdr *)skb_mac_header(skb); | 1020 | ethhdr = (struct ethhdr *)skb_mac_header(skb); |
942 | 1021 | ||
943 | /* packet with unicast indication but broadcast recipient */ | 1022 | /* packet with unicast indication but broadcast recipient */ |
944 | if (is_broadcast_ether_addr(ethhdr->h_dest)) | 1023 | if (is_broadcast_ether_addr(ethhdr->h_dest)) |
945 | return NET_RX_DROP; | 1024 | goto out; |
946 | 1025 | ||
947 | /* packet with broadcast sender address */ | 1026 | /* packet with broadcast sender address */ |
948 | if (is_broadcast_ether_addr(ethhdr->h_source)) | 1027 | if (is_broadcast_ether_addr(ethhdr->h_source)) |
949 | return NET_RX_DROP; | 1028 | goto out; |
950 | 1029 | ||
951 | /* not for me */ | 1030 | /* not for me */ |
952 | if (!is_my_mac(ethhdr->h_dest)) | 1031 | if (!is_my_mac(ethhdr->h_dest)) |
953 | return NET_RX_DROP; | 1032 | goto out; |
954 | 1033 | ||
955 | icmp_packet = (struct icmp_packet_rr *)skb->data; | 1034 | icmp_packet = (struct icmp_packet_rr *)skb->data; |
956 | 1035 | ||
@@ -970,50 +1049,59 @@ int recv_icmp_packet(struct sk_buff *skb, struct batman_if *recv_if) | |||
970 | if (icmp_packet->ttl < 2) | 1049 | if (icmp_packet->ttl < 2) |
971 | return recv_icmp_ttl_exceeded(bat_priv, skb); | 1050 | return recv_icmp_ttl_exceeded(bat_priv, skb); |
972 | 1051 | ||
973 | ret = NET_RX_DROP; | ||
974 | |||
975 | /* get routing information */ | 1052 | /* get routing information */ |
976 | spin_lock_bh(&bat_priv->orig_hash_lock); | 1053 | rcu_read_lock(); |
977 | orig_node = ((struct orig_node *) | 1054 | orig_node = orig_hash_find(bat_priv, icmp_packet->dst); |
978 | hash_find(bat_priv->orig_hash, compare_orig, choose_orig, | ||
979 | icmp_packet->dst)); | ||
980 | 1055 | ||
981 | if ((orig_node) && (orig_node->router)) { | 1056 | if (!orig_node) |
1057 | goto unlock; | ||
982 | 1058 | ||
983 | /* don't lock while sending the packets ... we therefore | 1059 | neigh_node = orig_node->router; |
984 | * copy the required data before sending */ | ||
985 | batman_if = orig_node->router->if_incoming; | ||
986 | memcpy(dstaddr, orig_node->router->addr, ETH_ALEN); | ||
987 | spin_unlock_bh(&bat_priv->orig_hash_lock); | ||
988 | 1060 | ||
989 | /* create a copy of the skb, if needed, to modify it. */ | 1061 | if (!neigh_node) |
990 | if (skb_cow(skb, sizeof(struct ethhdr)) < 0) | 1062 | goto unlock; |
991 | return NET_RX_DROP; | ||
992 | 1063 | ||
993 | icmp_packet = (struct icmp_packet_rr *)skb->data; | 1064 | if (!atomic_inc_not_zero(&neigh_node->refcount)) { |
1065 | neigh_node = NULL; | ||
1066 | goto unlock; | ||
1067 | } | ||
994 | 1068 | ||
995 | /* decrement ttl */ | 1069 | rcu_read_unlock(); |
996 | icmp_packet->ttl--; | ||
997 | 1070 | ||
998 | /* route it */ | 1071 | /* create a copy of the skb, if needed, to modify it. */ |
999 | send_skb_packet(skb, batman_if, dstaddr); | 1072 | if (skb_cow(skb, sizeof(struct ethhdr)) < 0) |
1000 | ret = NET_RX_SUCCESS; | 1073 | goto out; |
1001 | 1074 | ||
1002 | } else | 1075 | icmp_packet = (struct icmp_packet_rr *)skb->data; |
1003 | spin_unlock_bh(&bat_priv->orig_hash_lock); | 1076 | |
1077 | /* decrement ttl */ | ||
1078 | icmp_packet->ttl--; | ||
1079 | |||
1080 | /* route it */ | ||
1081 | send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr); | ||
1082 | ret = NET_RX_SUCCESS; | ||
1083 | goto out; | ||
1004 | 1084 | ||
1085 | unlock: | ||
1086 | rcu_read_unlock(); | ||
1087 | out: | ||
1088 | if (neigh_node) | ||
1089 | neigh_node_free_ref(neigh_node); | ||
1090 | if (orig_node) | ||
1091 | orig_node_free_ref(orig_node); | ||
1005 | return ret; | 1092 | return ret; |
1006 | } | 1093 | } |
1007 | 1094 | ||
1008 | /* find a suitable router for this originator, and use | 1095 | /* find a suitable router for this originator, and use |
1009 | * bonding if possible. */ | 1096 | * bonding if possible. increases the found neighbors |
1097 | * refcount.*/ | ||
1010 | struct neigh_node *find_router(struct bat_priv *bat_priv, | 1098 | struct neigh_node *find_router(struct bat_priv *bat_priv, |
1011 | struct orig_node *orig_node, | 1099 | struct orig_node *orig_node, |
1012 | struct batman_if *recv_if) | 1100 | struct hard_iface *recv_if) |
1013 | { | 1101 | { |
1014 | struct orig_node *primary_orig_node; | 1102 | struct orig_node *primary_orig_node; |
1015 | struct orig_node *router_orig; | 1103 | struct orig_node *router_orig; |
1016 | struct neigh_node *router, *first_candidate, *best_router; | 1104 | struct neigh_node *router, *first_candidate, *tmp_neigh_node; |
1017 | static uint8_t zero_mac[ETH_ALEN] = {0, 0, 0, 0, 0, 0}; | 1105 | static uint8_t zero_mac[ETH_ALEN] = {0, 0, 0, 0, 0, 0}; |
1018 | int bonding_enabled; | 1106 | int bonding_enabled; |
1019 | 1107 | ||
@@ -1025,78 +1113,128 @@ struct neigh_node *find_router(struct bat_priv *bat_priv, | |||
1025 | 1113 | ||
1026 | /* without bonding, the first node should | 1114 | /* without bonding, the first node should |
1027 | * always choose the default router. */ | 1115 | * always choose the default router. */ |
1028 | |||
1029 | bonding_enabled = atomic_read(&bat_priv->bonding); | 1116 | bonding_enabled = atomic_read(&bat_priv->bonding); |
1030 | 1117 | ||
1031 | if ((!recv_if) && (!bonding_enabled)) | 1118 | rcu_read_lock(); |
1032 | return orig_node->router; | 1119 | /* select default router to output */ |
1033 | 1120 | router = orig_node->router; | |
1034 | router_orig = orig_node->router->orig_node; | 1121 | router_orig = orig_node->router->orig_node; |
1122 | if (!router_orig || !atomic_inc_not_zero(&router->refcount)) { | ||
1123 | rcu_read_unlock(); | ||
1124 | return NULL; | ||
1125 | } | ||
1126 | |||
1127 | if ((!recv_if) && (!bonding_enabled)) | ||
1128 | goto return_router; | ||
1035 | 1129 | ||
1036 | /* if we have something in the primary_addr, we can search | 1130 | /* if we have something in the primary_addr, we can search |
1037 | * for a potential bonding candidate. */ | 1131 | * for a potential bonding candidate. */ |
1038 | if (memcmp(router_orig->primary_addr, zero_mac, ETH_ALEN) == 0) | 1132 | if (compare_eth(router_orig->primary_addr, zero_mac)) |
1039 | return orig_node->router; | 1133 | goto return_router; |
1040 | 1134 | ||
1041 | /* find the orig_node which has the primary interface. might | 1135 | /* find the orig_node which has the primary interface. might |
1042 | * even be the same as our router_orig in many cases */ | 1136 | * even be the same as our router_orig in many cases */ |
1043 | 1137 | ||
1044 | if (memcmp(router_orig->primary_addr, | 1138 | if (compare_eth(router_orig->primary_addr, router_orig->orig)) { |
1045 | router_orig->orig, ETH_ALEN) == 0) { | ||
1046 | primary_orig_node = router_orig; | 1139 | primary_orig_node = router_orig; |
1047 | } else { | 1140 | } else { |
1048 | primary_orig_node = hash_find(bat_priv->orig_hash, compare_orig, | 1141 | primary_orig_node = orig_hash_find(bat_priv, |
1049 | choose_orig, | 1142 | router_orig->primary_addr); |
1050 | router_orig->primary_addr); | ||
1051 | |||
1052 | if (!primary_orig_node) | 1143 | if (!primary_orig_node) |
1053 | return orig_node->router; | 1144 | goto return_router; |
1145 | |||
1146 | orig_node_free_ref(primary_orig_node); | ||
1054 | } | 1147 | } |
1055 | 1148 | ||
1056 | /* with less than 2 candidates, we can't do any | 1149 | /* with less than 2 candidates, we can't do any |
1057 | * bonding and prefer the original router. */ | 1150 | * bonding and prefer the original router. */ |
1058 | 1151 | if (atomic_read(&primary_orig_node->bond_candidates) < 2) | |
1059 | if (primary_orig_node->bond.candidates < 2) | 1152 | goto return_router; |
1060 | return orig_node->router; | ||
1061 | 1153 | ||
1062 | 1154 | ||
1063 | /* all nodes between should choose a candidate which | 1155 | /* all nodes between should choose a candidate which |
1064 | * is is not on the interface where the packet came | 1156 | * is is not on the interface where the packet came |
1065 | * in. */ | 1157 | * in. */ |
1066 | first_candidate = primary_orig_node->bond.selected; | 1158 | |
1067 | router = first_candidate; | 1159 | neigh_node_free_ref(router); |
1160 | first_candidate = NULL; | ||
1161 | router = NULL; | ||
1068 | 1162 | ||
1069 | if (bonding_enabled) { | 1163 | if (bonding_enabled) { |
1070 | /* in the bonding case, send the packets in a round | 1164 | /* in the bonding case, send the packets in a round |
1071 | * robin fashion over the remaining interfaces. */ | 1165 | * robin fashion over the remaining interfaces. */ |
1072 | do { | 1166 | |
1167 | list_for_each_entry_rcu(tmp_neigh_node, | ||
1168 | &primary_orig_node->bond_list, bonding_list) { | ||
1169 | if (!first_candidate) | ||
1170 | first_candidate = tmp_neigh_node; | ||
1073 | /* recv_if == NULL on the first node. */ | 1171 | /* recv_if == NULL on the first node. */ |
1074 | if (router->if_incoming != recv_if) | 1172 | if (tmp_neigh_node->if_incoming != recv_if && |
1173 | atomic_inc_not_zero(&tmp_neigh_node->refcount)) { | ||
1174 | router = tmp_neigh_node; | ||
1075 | break; | 1175 | break; |
1176 | } | ||
1177 | } | ||
1076 | 1178 | ||
1077 | router = router->next_bond_candidate; | 1179 | /* use the first candidate if nothing was found. */ |
1078 | } while (router != first_candidate); | 1180 | if (!router && first_candidate && |
1181 | atomic_inc_not_zero(&first_candidate->refcount)) | ||
1182 | router = first_candidate; | ||
1079 | 1183 | ||
1080 | primary_orig_node->bond.selected = router->next_bond_candidate; | 1184 | if (!router) { |
1185 | rcu_read_unlock(); | ||
1186 | return NULL; | ||
1187 | } | ||
1188 | |||
1189 | /* selected should point to the next element | ||
1190 | * after the current router */ | ||
1191 | spin_lock_bh(&primary_orig_node->neigh_list_lock); | ||
1192 | /* this is a list_move(), which unfortunately | ||
1193 | * does not exist as rcu version */ | ||
1194 | list_del_rcu(&primary_orig_node->bond_list); | ||
1195 | list_add_rcu(&primary_orig_node->bond_list, | ||
1196 | &router->bonding_list); | ||
1197 | spin_unlock_bh(&primary_orig_node->neigh_list_lock); | ||
1081 | 1198 | ||
1082 | } else { | 1199 | } else { |
1083 | /* if bonding is disabled, use the best of the | 1200 | /* if bonding is disabled, use the best of the |
1084 | * remaining candidates which are not using | 1201 | * remaining candidates which are not using |
1085 | * this interface. */ | 1202 | * this interface. */ |
1086 | best_router = first_candidate; | 1203 | list_for_each_entry_rcu(tmp_neigh_node, |
1204 | &primary_orig_node->bond_list, bonding_list) { | ||
1205 | if (!first_candidate) | ||
1206 | first_candidate = tmp_neigh_node; | ||
1087 | 1207 | ||
1088 | do { | ||
1089 | /* recv_if == NULL on the first node. */ | 1208 | /* recv_if == NULL on the first node. */ |
1090 | if ((router->if_incoming != recv_if) && | 1209 | if (tmp_neigh_node->if_incoming == recv_if) |
1091 | (router->tq_avg > best_router->tq_avg)) | 1210 | continue; |
1092 | best_router = router; | ||
1093 | 1211 | ||
1094 | router = router->next_bond_candidate; | 1212 | if (!atomic_inc_not_zero(&tmp_neigh_node->refcount)) |
1095 | } while (router != first_candidate); | 1213 | continue; |
1096 | 1214 | ||
1097 | router = best_router; | 1215 | /* if we don't have a router yet |
1098 | } | 1216 | * or this one is better, choose it. */ |
1217 | if ((!router) || | ||
1218 | (tmp_neigh_node->tq_avg > router->tq_avg)) { | ||
1219 | /* decrement refcount of | ||
1220 | * previously selected router */ | ||
1221 | if (router) | ||
1222 | neigh_node_free_ref(router); | ||
1223 | |||
1224 | router = tmp_neigh_node; | ||
1225 | atomic_inc_not_zero(&router->refcount); | ||
1226 | } | ||
1227 | |||
1228 | neigh_node_free_ref(tmp_neigh_node); | ||
1229 | } | ||
1099 | 1230 | ||
1231 | /* use the first candidate if nothing was found. */ | ||
1232 | if (!router && first_candidate && | ||
1233 | atomic_inc_not_zero(&first_candidate->refcount)) | ||
1234 | router = first_candidate; | ||
1235 | } | ||
1236 | return_router: | ||
1237 | rcu_read_unlock(); | ||
1100 | return router; | 1238 | return router; |
1101 | } | 1239 | } |
1102 | 1240 | ||
@@ -1125,17 +1263,14 @@ static int check_unicast_packet(struct sk_buff *skb, int hdr_size) | |||
1125 | return 0; | 1263 | return 0; |
1126 | } | 1264 | } |
1127 | 1265 | ||
1128 | int route_unicast_packet(struct sk_buff *skb, struct batman_if *recv_if, | 1266 | int route_unicast_packet(struct sk_buff *skb, struct hard_iface *recv_if) |
1129 | int hdr_size) | ||
1130 | { | 1267 | { |
1131 | struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface); | 1268 | struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface); |
1132 | struct orig_node *orig_node; | 1269 | struct orig_node *orig_node = NULL; |
1133 | struct neigh_node *router; | 1270 | struct neigh_node *neigh_node = NULL; |
1134 | struct batman_if *batman_if; | ||
1135 | uint8_t dstaddr[ETH_ALEN]; | ||
1136 | struct unicast_packet *unicast_packet; | 1271 | struct unicast_packet *unicast_packet; |
1137 | struct ethhdr *ethhdr = (struct ethhdr *)skb_mac_header(skb); | 1272 | struct ethhdr *ethhdr = (struct ethhdr *)skb_mac_header(skb); |
1138 | int ret; | 1273 | int ret = NET_RX_DROP; |
1139 | struct sk_buff *new_skb; | 1274 | struct sk_buff *new_skb; |
1140 | 1275 | ||
1141 | unicast_packet = (struct unicast_packet *)skb->data; | 1276 | unicast_packet = (struct unicast_packet *)skb->data; |
@@ -1145,53 +1280,51 @@ int route_unicast_packet(struct sk_buff *skb, struct batman_if *recv_if, | |||
1145 | pr_debug("Warning - can't forward unicast packet from %pM to " | 1280 | pr_debug("Warning - can't forward unicast packet from %pM to " |
1146 | "%pM: ttl exceeded\n", ethhdr->h_source, | 1281 | "%pM: ttl exceeded\n", ethhdr->h_source, |
1147 | unicast_packet->dest); | 1282 | unicast_packet->dest); |
1148 | return NET_RX_DROP; | 1283 | goto out; |
1149 | } | 1284 | } |
1150 | 1285 | ||
1151 | /* get routing information */ | 1286 | /* get routing information */ |
1152 | spin_lock_bh(&bat_priv->orig_hash_lock); | 1287 | rcu_read_lock(); |
1153 | orig_node = ((struct orig_node *) | 1288 | orig_node = orig_hash_find(bat_priv, unicast_packet->dest); |
1154 | hash_find(bat_priv->orig_hash, compare_orig, choose_orig, | ||
1155 | unicast_packet->dest)); | ||
1156 | |||
1157 | router = find_router(bat_priv, orig_node, recv_if); | ||
1158 | 1289 | ||
1159 | if (!router) { | 1290 | if (!orig_node) |
1160 | spin_unlock_bh(&bat_priv->orig_hash_lock); | 1291 | goto unlock; |
1161 | return NET_RX_DROP; | ||
1162 | } | ||
1163 | 1292 | ||
1164 | /* don't lock while sending the packets ... we therefore | 1293 | rcu_read_unlock(); |
1165 | * copy the required data before sending */ | ||
1166 | 1294 | ||
1167 | batman_if = router->if_incoming; | 1295 | /* find_router() increases neigh_nodes refcount if found. */ |
1168 | memcpy(dstaddr, router->addr, ETH_ALEN); | 1296 | neigh_node = find_router(bat_priv, orig_node, recv_if); |
1169 | 1297 | ||
1170 | spin_unlock_bh(&bat_priv->orig_hash_lock); | 1298 | if (!neigh_node) |
1299 | goto out; | ||
1171 | 1300 | ||
1172 | /* create a copy of the skb, if needed, to modify it. */ | 1301 | /* create a copy of the skb, if needed, to modify it. */ |
1173 | if (skb_cow(skb, sizeof(struct ethhdr)) < 0) | 1302 | if (skb_cow(skb, sizeof(struct ethhdr)) < 0) |
1174 | return NET_RX_DROP; | 1303 | goto out; |
1175 | 1304 | ||
1176 | unicast_packet = (struct unicast_packet *)skb->data; | 1305 | unicast_packet = (struct unicast_packet *)skb->data; |
1177 | 1306 | ||
1178 | if (unicast_packet->packet_type == BAT_UNICAST && | 1307 | if (unicast_packet->packet_type == BAT_UNICAST && |
1179 | atomic_read(&bat_priv->fragmentation) && | 1308 | atomic_read(&bat_priv->fragmentation) && |
1180 | skb->len > batman_if->net_dev->mtu) | 1309 | skb->len > neigh_node->if_incoming->net_dev->mtu) { |
1181 | return frag_send_skb(skb, bat_priv, batman_if, | 1310 | ret = frag_send_skb(skb, bat_priv, |
1182 | dstaddr); | 1311 | neigh_node->if_incoming, neigh_node->addr); |
1312 | goto out; | ||
1313 | } | ||
1183 | 1314 | ||
1184 | if (unicast_packet->packet_type == BAT_UNICAST_FRAG && | 1315 | if (unicast_packet->packet_type == BAT_UNICAST_FRAG && |
1185 | frag_can_reassemble(skb, batman_if->net_dev->mtu)) { | 1316 | frag_can_reassemble(skb, neigh_node->if_incoming->net_dev->mtu)) { |
1186 | 1317 | ||
1187 | ret = frag_reassemble_skb(skb, bat_priv, &new_skb); | 1318 | ret = frag_reassemble_skb(skb, bat_priv, &new_skb); |
1188 | 1319 | ||
1189 | if (ret == NET_RX_DROP) | 1320 | if (ret == NET_RX_DROP) |
1190 | return NET_RX_DROP; | 1321 | goto out; |
1191 | 1322 | ||
1192 | /* packet was buffered for late merge */ | 1323 | /* packet was buffered for late merge */ |
1193 | if (!new_skb) | 1324 | if (!new_skb) { |
1194 | return NET_RX_SUCCESS; | 1325 | ret = NET_RX_SUCCESS; |
1326 | goto out; | ||
1327 | } | ||
1195 | 1328 | ||
1196 | skb = new_skb; | 1329 | skb = new_skb; |
1197 | unicast_packet = (struct unicast_packet *)skb->data; | 1330 | unicast_packet = (struct unicast_packet *)skb->data; |
@@ -1201,12 +1334,21 @@ int route_unicast_packet(struct sk_buff *skb, struct batman_if *recv_if, | |||
1201 | unicast_packet->ttl--; | 1334 | unicast_packet->ttl--; |
1202 | 1335 | ||
1203 | /* route it */ | 1336 | /* route it */ |
1204 | send_skb_packet(skb, batman_if, dstaddr); | 1337 | send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr); |
1338 | ret = NET_RX_SUCCESS; | ||
1339 | goto out; | ||
1205 | 1340 | ||
1206 | return NET_RX_SUCCESS; | 1341 | unlock: |
1342 | rcu_read_unlock(); | ||
1343 | out: | ||
1344 | if (neigh_node) | ||
1345 | neigh_node_free_ref(neigh_node); | ||
1346 | if (orig_node) | ||
1347 | orig_node_free_ref(orig_node); | ||
1348 | return ret; | ||
1207 | } | 1349 | } |
1208 | 1350 | ||
1209 | int recv_unicast_packet(struct sk_buff *skb, struct batman_if *recv_if) | 1351 | int recv_unicast_packet(struct sk_buff *skb, struct hard_iface *recv_if) |
1210 | { | 1352 | { |
1211 | struct unicast_packet *unicast_packet; | 1353 | struct unicast_packet *unicast_packet; |
1212 | int hdr_size = sizeof(struct unicast_packet); | 1354 | int hdr_size = sizeof(struct unicast_packet); |
@@ -1222,10 +1364,10 @@ int recv_unicast_packet(struct sk_buff *skb, struct batman_if *recv_if) | |||
1222 | return NET_RX_SUCCESS; | 1364 | return NET_RX_SUCCESS; |
1223 | } | 1365 | } |
1224 | 1366 | ||
1225 | return route_unicast_packet(skb, recv_if, hdr_size); | 1367 | return route_unicast_packet(skb, recv_if); |
1226 | } | 1368 | } |
1227 | 1369 | ||
1228 | int recv_ucast_frag_packet(struct sk_buff *skb, struct batman_if *recv_if) | 1370 | int recv_ucast_frag_packet(struct sk_buff *skb, struct hard_iface *recv_if) |
1229 | { | 1371 | { |
1230 | struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface); | 1372 | struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface); |
1231 | struct unicast_frag_packet *unicast_packet; | 1373 | struct unicast_frag_packet *unicast_packet; |
@@ -1255,89 +1397,96 @@ int recv_ucast_frag_packet(struct sk_buff *skb, struct batman_if *recv_if) | |||
1255 | return NET_RX_SUCCESS; | 1397 | return NET_RX_SUCCESS; |
1256 | } | 1398 | } |
1257 | 1399 | ||
1258 | return route_unicast_packet(skb, recv_if, hdr_size); | 1400 | return route_unicast_packet(skb, recv_if); |
1259 | } | 1401 | } |
1260 | 1402 | ||
1261 | 1403 | ||
1262 | int recv_bcast_packet(struct sk_buff *skb, struct batman_if *recv_if) | 1404 | int recv_bcast_packet(struct sk_buff *skb, struct hard_iface *recv_if) |
1263 | { | 1405 | { |
1264 | struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface); | 1406 | struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface); |
1265 | struct orig_node *orig_node; | 1407 | struct orig_node *orig_node = NULL; |
1266 | struct bcast_packet *bcast_packet; | 1408 | struct bcast_packet *bcast_packet; |
1267 | struct ethhdr *ethhdr; | 1409 | struct ethhdr *ethhdr; |
1268 | int hdr_size = sizeof(struct bcast_packet); | 1410 | int hdr_size = sizeof(struct bcast_packet); |
1411 | int ret = NET_RX_DROP; | ||
1269 | int32_t seq_diff; | 1412 | int32_t seq_diff; |
1270 | 1413 | ||
1271 | /* drop packet if it has not necessary minimum size */ | 1414 | /* drop packet if it has not necessary minimum size */ |
1272 | if (unlikely(!pskb_may_pull(skb, hdr_size))) | 1415 | if (unlikely(!pskb_may_pull(skb, hdr_size))) |
1273 | return NET_RX_DROP; | 1416 | goto out; |
1274 | 1417 | ||
1275 | ethhdr = (struct ethhdr *)skb_mac_header(skb); | 1418 | ethhdr = (struct ethhdr *)skb_mac_header(skb); |
1276 | 1419 | ||
1277 | /* packet with broadcast indication but unicast recipient */ | 1420 | /* packet with broadcast indication but unicast recipient */ |
1278 | if (!is_broadcast_ether_addr(ethhdr->h_dest)) | 1421 | if (!is_broadcast_ether_addr(ethhdr->h_dest)) |
1279 | return NET_RX_DROP; | 1422 | goto out; |
1280 | 1423 | ||
1281 | /* packet with broadcast sender address */ | 1424 | /* packet with broadcast sender address */ |
1282 | if (is_broadcast_ether_addr(ethhdr->h_source)) | 1425 | if (is_broadcast_ether_addr(ethhdr->h_source)) |
1283 | return NET_RX_DROP; | 1426 | goto out; |
1284 | 1427 | ||
1285 | /* ignore broadcasts sent by myself */ | 1428 | /* ignore broadcasts sent by myself */ |
1286 | if (is_my_mac(ethhdr->h_source)) | 1429 | if (is_my_mac(ethhdr->h_source)) |
1287 | return NET_RX_DROP; | 1430 | goto out; |
1288 | 1431 | ||
1289 | bcast_packet = (struct bcast_packet *)skb->data; | 1432 | bcast_packet = (struct bcast_packet *)skb->data; |
1290 | 1433 | ||
1291 | /* ignore broadcasts originated by myself */ | 1434 | /* ignore broadcasts originated by myself */ |
1292 | if (is_my_mac(bcast_packet->orig)) | 1435 | if (is_my_mac(bcast_packet->orig)) |
1293 | return NET_RX_DROP; | 1436 | goto out; |
1294 | 1437 | ||
1295 | if (bcast_packet->ttl < 2) | 1438 | if (bcast_packet->ttl < 2) |
1296 | return NET_RX_DROP; | 1439 | goto out; |
1297 | 1440 | ||
1298 | spin_lock_bh(&bat_priv->orig_hash_lock); | 1441 | rcu_read_lock(); |
1299 | orig_node = ((struct orig_node *) | 1442 | orig_node = orig_hash_find(bat_priv, bcast_packet->orig); |
1300 | hash_find(bat_priv->orig_hash, compare_orig, choose_orig, | ||
1301 | bcast_packet->orig)); | ||
1302 | 1443 | ||
1303 | if (!orig_node) { | 1444 | if (!orig_node) |
1304 | spin_unlock_bh(&bat_priv->orig_hash_lock); | 1445 | goto rcu_unlock; |
1305 | return NET_RX_DROP; | 1446 | |
1306 | } | 1447 | rcu_read_unlock(); |
1448 | |||
1449 | spin_lock_bh(&orig_node->bcast_seqno_lock); | ||
1307 | 1450 | ||
1308 | /* check whether the packet is a duplicate */ | 1451 | /* check whether the packet is a duplicate */ |
1309 | if (get_bit_status(orig_node->bcast_bits, | 1452 | if (get_bit_status(orig_node->bcast_bits, orig_node->last_bcast_seqno, |
1310 | orig_node->last_bcast_seqno, | 1453 | ntohl(bcast_packet->seqno))) |
1311 | ntohl(bcast_packet->seqno))) { | 1454 | goto spin_unlock; |
1312 | spin_unlock_bh(&bat_priv->orig_hash_lock); | ||
1313 | return NET_RX_DROP; | ||
1314 | } | ||
1315 | 1455 | ||
1316 | seq_diff = ntohl(bcast_packet->seqno) - orig_node->last_bcast_seqno; | 1456 | seq_diff = ntohl(bcast_packet->seqno) - orig_node->last_bcast_seqno; |
1317 | 1457 | ||
1318 | /* check whether the packet is old and the host just restarted. */ | 1458 | /* check whether the packet is old and the host just restarted. */ |
1319 | if (window_protected(bat_priv, seq_diff, | 1459 | if (window_protected(bat_priv, seq_diff, |
1320 | &orig_node->bcast_seqno_reset)) { | 1460 | &orig_node->bcast_seqno_reset)) |
1321 | spin_unlock_bh(&bat_priv->orig_hash_lock); | 1461 | goto spin_unlock; |
1322 | return NET_RX_DROP; | ||
1323 | } | ||
1324 | 1462 | ||
1325 | /* mark broadcast in flood history, update window position | 1463 | /* mark broadcast in flood history, update window position |
1326 | * if required. */ | 1464 | * if required. */ |
1327 | if (bit_get_packet(bat_priv, orig_node->bcast_bits, seq_diff, 1)) | 1465 | if (bit_get_packet(bat_priv, orig_node->bcast_bits, seq_diff, 1)) |
1328 | orig_node->last_bcast_seqno = ntohl(bcast_packet->seqno); | 1466 | orig_node->last_bcast_seqno = ntohl(bcast_packet->seqno); |
1329 | 1467 | ||
1330 | spin_unlock_bh(&bat_priv->orig_hash_lock); | 1468 | spin_unlock_bh(&orig_node->bcast_seqno_lock); |
1469 | |||
1331 | /* rebroadcast packet */ | 1470 | /* rebroadcast packet */ |
1332 | add_bcast_packet_to_list(bat_priv, skb); | 1471 | add_bcast_packet_to_list(bat_priv, skb); |
1333 | 1472 | ||
1334 | /* broadcast for me */ | 1473 | /* broadcast for me */ |
1335 | interface_rx(recv_if->soft_iface, skb, recv_if, hdr_size); | 1474 | interface_rx(recv_if->soft_iface, skb, recv_if, hdr_size); |
1475 | ret = NET_RX_SUCCESS; | ||
1476 | goto out; | ||
1336 | 1477 | ||
1337 | return NET_RX_SUCCESS; | 1478 | rcu_unlock: |
1479 | rcu_read_unlock(); | ||
1480 | goto out; | ||
1481 | spin_unlock: | ||
1482 | spin_unlock_bh(&orig_node->bcast_seqno_lock); | ||
1483 | out: | ||
1484 | if (orig_node) | ||
1485 | orig_node_free_ref(orig_node); | ||
1486 | return ret; | ||
1338 | } | 1487 | } |
1339 | 1488 | ||
1340 | int recv_vis_packet(struct sk_buff *skb, struct batman_if *recv_if) | 1489 | int recv_vis_packet(struct sk_buff *skb, struct hard_iface *recv_if) |
1341 | { | 1490 | { |
1342 | struct vis_packet *vis_packet; | 1491 | struct vis_packet *vis_packet; |
1343 | struct ethhdr *ethhdr; | 1492 | struct ethhdr *ethhdr; |
diff --git a/net/batman-adv/routing.h b/net/batman-adv/routing.h index a09d16f0c3ab..b5a064c88a4f 100644 --- a/net/batman-adv/routing.h +++ b/net/batman-adv/routing.h | |||
@@ -22,24 +22,25 @@ | |||
22 | #ifndef _NET_BATMAN_ADV_ROUTING_H_ | 22 | #ifndef _NET_BATMAN_ADV_ROUTING_H_ |
23 | #define _NET_BATMAN_ADV_ROUTING_H_ | 23 | #define _NET_BATMAN_ADV_ROUTING_H_ |
24 | 24 | ||
25 | void slide_own_bcast_window(struct batman_if *batman_if); | 25 | void slide_own_bcast_window(struct hard_iface *hard_iface); |
26 | void receive_bat_packet(struct ethhdr *ethhdr, | 26 | void receive_bat_packet(struct ethhdr *ethhdr, |
27 | struct batman_packet *batman_packet, | 27 | struct batman_packet *batman_packet, |
28 | unsigned char *hna_buff, int hna_buff_len, | 28 | unsigned char *hna_buff, int hna_buff_len, |
29 | struct batman_if *if_incoming); | 29 | struct hard_iface *if_incoming); |
30 | void update_routes(struct bat_priv *bat_priv, struct orig_node *orig_node, | 30 | void update_routes(struct bat_priv *bat_priv, struct orig_node *orig_node, |
31 | struct neigh_node *neigh_node, unsigned char *hna_buff, | 31 | struct neigh_node *neigh_node, unsigned char *hna_buff, |
32 | int hna_buff_len); | 32 | int hna_buff_len); |
33 | int route_unicast_packet(struct sk_buff *skb, struct batman_if *recv_if, | 33 | int route_unicast_packet(struct sk_buff *skb, struct hard_iface *recv_if); |
34 | int hdr_size); | 34 | int recv_icmp_packet(struct sk_buff *skb, struct hard_iface *recv_if); |
35 | int recv_icmp_packet(struct sk_buff *skb, struct batman_if *recv_if); | 35 | int recv_unicast_packet(struct sk_buff *skb, struct hard_iface *recv_if); |
36 | int recv_unicast_packet(struct sk_buff *skb, struct batman_if *recv_if); | 36 | int recv_ucast_frag_packet(struct sk_buff *skb, struct hard_iface *recv_if); |
37 | int recv_ucast_frag_packet(struct sk_buff *skb, struct batman_if *recv_if); | 37 | int recv_bcast_packet(struct sk_buff *skb, struct hard_iface *recv_if); |
38 | int recv_bcast_packet(struct sk_buff *skb, struct batman_if *recv_if); | 38 | int recv_vis_packet(struct sk_buff *skb, struct hard_iface *recv_if); |
39 | int recv_vis_packet(struct sk_buff *skb, struct batman_if *recv_if); | 39 | int recv_bat_packet(struct sk_buff *skb, struct hard_iface *recv_if); |
40 | int recv_bat_packet(struct sk_buff *skb, struct batman_if *recv_if); | ||
41 | struct neigh_node *find_router(struct bat_priv *bat_priv, | 40 | struct neigh_node *find_router(struct bat_priv *bat_priv, |
42 | struct orig_node *orig_node, struct batman_if *recv_if); | 41 | struct orig_node *orig_node, |
43 | void update_bonding_candidates(struct orig_node *orig_node); | 42 | struct hard_iface *recv_if); |
43 | void bonding_candidate_del(struct orig_node *orig_node, | ||
44 | struct neigh_node *neigh_node); | ||
44 | 45 | ||
45 | #endif /* _NET_BATMAN_ADV_ROUTING_H_ */ | 46 | #endif /* _NET_BATMAN_ADV_ROUTING_H_ */ |
diff --git a/net/batman-adv/send.c b/net/batman-adv/send.c index 831427694fc2..d49e54d932af 100644 --- a/net/batman-adv/send.c +++ b/net/batman-adv/send.c | |||
@@ -56,20 +56,20 @@ static unsigned long forward_send_time(void) | |||
56 | /* send out an already prepared packet to the given address via the | 56 | /* send out an already prepared packet to the given address via the |
57 | * specified batman interface */ | 57 | * specified batman interface */ |
58 | int send_skb_packet(struct sk_buff *skb, | 58 | int send_skb_packet(struct sk_buff *skb, |
59 | struct batman_if *batman_if, | 59 | struct hard_iface *hard_iface, |
60 | uint8_t *dst_addr) | 60 | uint8_t *dst_addr) |
61 | { | 61 | { |
62 | struct ethhdr *ethhdr; | 62 | struct ethhdr *ethhdr; |
63 | 63 | ||
64 | if (batman_if->if_status != IF_ACTIVE) | 64 | if (hard_iface->if_status != IF_ACTIVE) |
65 | goto send_skb_err; | 65 | goto send_skb_err; |
66 | 66 | ||
67 | if (unlikely(!batman_if->net_dev)) | 67 | if (unlikely(!hard_iface->net_dev)) |
68 | goto send_skb_err; | 68 | goto send_skb_err; |
69 | 69 | ||
70 | if (!(batman_if->net_dev->flags & IFF_UP)) { | 70 | if (!(hard_iface->net_dev->flags & IFF_UP)) { |
71 | pr_warning("Interface %s is not up - can't send packet via " | 71 | pr_warning("Interface %s is not up - can't send packet via " |
72 | "that interface!\n", batman_if->net_dev->name); | 72 | "that interface!\n", hard_iface->net_dev->name); |
73 | goto send_skb_err; | 73 | goto send_skb_err; |
74 | } | 74 | } |
75 | 75 | ||
@@ -80,7 +80,7 @@ int send_skb_packet(struct sk_buff *skb, | |||
80 | skb_reset_mac_header(skb); | 80 | skb_reset_mac_header(skb); |
81 | 81 | ||
82 | ethhdr = (struct ethhdr *) skb_mac_header(skb); | 82 | ethhdr = (struct ethhdr *) skb_mac_header(skb); |
83 | memcpy(ethhdr->h_source, batman_if->net_dev->dev_addr, ETH_ALEN); | 83 | memcpy(ethhdr->h_source, hard_iface->net_dev->dev_addr, ETH_ALEN); |
84 | memcpy(ethhdr->h_dest, dst_addr, ETH_ALEN); | 84 | memcpy(ethhdr->h_dest, dst_addr, ETH_ALEN); |
85 | ethhdr->h_proto = __constant_htons(ETH_P_BATMAN); | 85 | ethhdr->h_proto = __constant_htons(ETH_P_BATMAN); |
86 | 86 | ||
@@ -88,7 +88,7 @@ int send_skb_packet(struct sk_buff *skb, | |||
88 | skb->priority = TC_PRIO_CONTROL; | 88 | skb->priority = TC_PRIO_CONTROL; |
89 | skb->protocol = __constant_htons(ETH_P_BATMAN); | 89 | skb->protocol = __constant_htons(ETH_P_BATMAN); |
90 | 90 | ||
91 | skb->dev = batman_if->net_dev; | 91 | skb->dev = hard_iface->net_dev; |
92 | 92 | ||
93 | /* dev_queue_xmit() returns a negative result on error. However on | 93 | /* dev_queue_xmit() returns a negative result on error. However on |
94 | * congestion and traffic shaping, it drops and returns NET_XMIT_DROP | 94 | * congestion and traffic shaping, it drops and returns NET_XMIT_DROP |
@@ -102,16 +102,16 @@ send_skb_err: | |||
102 | 102 | ||
103 | /* Send a packet to a given interface */ | 103 | /* Send a packet to a given interface */ |
104 | static void send_packet_to_if(struct forw_packet *forw_packet, | 104 | static void send_packet_to_if(struct forw_packet *forw_packet, |
105 | struct batman_if *batman_if) | 105 | struct hard_iface *hard_iface) |
106 | { | 106 | { |
107 | struct bat_priv *bat_priv = netdev_priv(batman_if->soft_iface); | 107 | struct bat_priv *bat_priv = netdev_priv(hard_iface->soft_iface); |
108 | char *fwd_str; | 108 | char *fwd_str; |
109 | uint8_t packet_num; | 109 | uint8_t packet_num; |
110 | int16_t buff_pos; | 110 | int16_t buff_pos; |
111 | struct batman_packet *batman_packet; | 111 | struct batman_packet *batman_packet; |
112 | struct sk_buff *skb; | 112 | struct sk_buff *skb; |
113 | 113 | ||
114 | if (batman_if->if_status != IF_ACTIVE) | 114 | if (hard_iface->if_status != IF_ACTIVE) |
115 | return; | 115 | return; |
116 | 116 | ||
117 | packet_num = 0; | 117 | packet_num = 0; |
@@ -126,7 +126,7 @@ static void send_packet_to_if(struct forw_packet *forw_packet, | |||
126 | /* we might have aggregated direct link packets with an | 126 | /* we might have aggregated direct link packets with an |
127 | * ordinary base packet */ | 127 | * ordinary base packet */ |
128 | if ((forw_packet->direct_link_flags & (1 << packet_num)) && | 128 | if ((forw_packet->direct_link_flags & (1 << packet_num)) && |
129 | (forw_packet->if_incoming == batman_if)) | 129 | (forw_packet->if_incoming == hard_iface)) |
130 | batman_packet->flags |= DIRECTLINK; | 130 | batman_packet->flags |= DIRECTLINK; |
131 | else | 131 | else |
132 | batman_packet->flags &= ~DIRECTLINK; | 132 | batman_packet->flags &= ~DIRECTLINK; |
@@ -142,7 +142,8 @@ static void send_packet_to_if(struct forw_packet *forw_packet, | |||
142 | batman_packet->tq, batman_packet->ttl, | 142 | batman_packet->tq, batman_packet->ttl, |
143 | (batman_packet->flags & DIRECTLINK ? | 143 | (batman_packet->flags & DIRECTLINK ? |
144 | "on" : "off"), | 144 | "on" : "off"), |
145 | batman_if->net_dev->name, batman_if->net_dev->dev_addr); | 145 | hard_iface->net_dev->name, |
146 | hard_iface->net_dev->dev_addr); | ||
146 | 147 | ||
147 | buff_pos += sizeof(struct batman_packet) + | 148 | buff_pos += sizeof(struct batman_packet) + |
148 | (batman_packet->num_hna * ETH_ALEN); | 149 | (batman_packet->num_hna * ETH_ALEN); |
@@ -154,13 +155,13 @@ static void send_packet_to_if(struct forw_packet *forw_packet, | |||
154 | /* create clone because function is called more than once */ | 155 | /* create clone because function is called more than once */ |
155 | skb = skb_clone(forw_packet->skb, GFP_ATOMIC); | 156 | skb = skb_clone(forw_packet->skb, GFP_ATOMIC); |
156 | if (skb) | 157 | if (skb) |
157 | send_skb_packet(skb, batman_if, broadcast_addr); | 158 | send_skb_packet(skb, hard_iface, broadcast_addr); |
158 | } | 159 | } |
159 | 160 | ||
160 | /* send a batman packet */ | 161 | /* send a batman packet */ |
161 | static void send_packet(struct forw_packet *forw_packet) | 162 | static void send_packet(struct forw_packet *forw_packet) |
162 | { | 163 | { |
163 | struct batman_if *batman_if; | 164 | struct hard_iface *hard_iface; |
164 | struct net_device *soft_iface; | 165 | struct net_device *soft_iface; |
165 | struct bat_priv *bat_priv; | 166 | struct bat_priv *bat_priv; |
166 | struct batman_packet *batman_packet = | 167 | struct batman_packet *batman_packet = |
@@ -204,17 +205,17 @@ static void send_packet(struct forw_packet *forw_packet) | |||
204 | 205 | ||
205 | /* broadcast on every interface */ | 206 | /* broadcast on every interface */ |
206 | rcu_read_lock(); | 207 | rcu_read_lock(); |
207 | list_for_each_entry_rcu(batman_if, &if_list, list) { | 208 | list_for_each_entry_rcu(hard_iface, &hardif_list, list) { |
208 | if (batman_if->soft_iface != soft_iface) | 209 | if (hard_iface->soft_iface != soft_iface) |
209 | continue; | 210 | continue; |
210 | 211 | ||
211 | send_packet_to_if(forw_packet, batman_if); | 212 | send_packet_to_if(forw_packet, hard_iface); |
212 | } | 213 | } |
213 | rcu_read_unlock(); | 214 | rcu_read_unlock(); |
214 | } | 215 | } |
215 | 216 | ||
216 | static void rebuild_batman_packet(struct bat_priv *bat_priv, | 217 | static void rebuild_batman_packet(struct bat_priv *bat_priv, |
217 | struct batman_if *batman_if) | 218 | struct hard_iface *hard_iface) |
218 | { | 219 | { |
219 | int new_len; | 220 | int new_len; |
220 | unsigned char *new_buff; | 221 | unsigned char *new_buff; |
@@ -226,7 +227,7 @@ static void rebuild_batman_packet(struct bat_priv *bat_priv, | |||
226 | 227 | ||
227 | /* keep old buffer if kmalloc should fail */ | 228 | /* keep old buffer if kmalloc should fail */ |
228 | if (new_buff) { | 229 | if (new_buff) { |
229 | memcpy(new_buff, batman_if->packet_buff, | 230 | memcpy(new_buff, hard_iface->packet_buff, |
230 | sizeof(struct batman_packet)); | 231 | sizeof(struct batman_packet)); |
231 | batman_packet = (struct batman_packet *)new_buff; | 232 | batman_packet = (struct batman_packet *)new_buff; |
232 | 233 | ||
@@ -234,21 +235,21 @@ static void rebuild_batman_packet(struct bat_priv *bat_priv, | |||
234 | new_buff + sizeof(struct batman_packet), | 235 | new_buff + sizeof(struct batman_packet), |
235 | new_len - sizeof(struct batman_packet)); | 236 | new_len - sizeof(struct batman_packet)); |
236 | 237 | ||
237 | kfree(batman_if->packet_buff); | 238 | kfree(hard_iface->packet_buff); |
238 | batman_if->packet_buff = new_buff; | 239 | hard_iface->packet_buff = new_buff; |
239 | batman_if->packet_len = new_len; | 240 | hard_iface->packet_len = new_len; |
240 | } | 241 | } |
241 | } | 242 | } |
242 | 243 | ||
243 | void schedule_own_packet(struct batman_if *batman_if) | 244 | void schedule_own_packet(struct hard_iface *hard_iface) |
244 | { | 245 | { |
245 | struct bat_priv *bat_priv = netdev_priv(batman_if->soft_iface); | 246 | struct bat_priv *bat_priv = netdev_priv(hard_iface->soft_iface); |
246 | unsigned long send_time; | 247 | unsigned long send_time; |
247 | struct batman_packet *batman_packet; | 248 | struct batman_packet *batman_packet; |
248 | int vis_server; | 249 | int vis_server; |
249 | 250 | ||
250 | if ((batman_if->if_status == IF_NOT_IN_USE) || | 251 | if ((hard_iface->if_status == IF_NOT_IN_USE) || |
251 | (batman_if->if_status == IF_TO_BE_REMOVED)) | 252 | (hard_iface->if_status == IF_TO_BE_REMOVED)) |
252 | return; | 253 | return; |
253 | 254 | ||
254 | vis_server = atomic_read(&bat_priv->vis_mode); | 255 | vis_server = atomic_read(&bat_priv->vis_mode); |
@@ -260,51 +261,51 @@ void schedule_own_packet(struct batman_if *batman_if) | |||
260 | * outdated packets (especially uninitialized mac addresses) in the | 261 | * outdated packets (especially uninitialized mac addresses) in the |
261 | * packet queue | 262 | * packet queue |
262 | */ | 263 | */ |
263 | if (batman_if->if_status == IF_TO_BE_ACTIVATED) | 264 | if (hard_iface->if_status == IF_TO_BE_ACTIVATED) |
264 | batman_if->if_status = IF_ACTIVE; | 265 | hard_iface->if_status = IF_ACTIVE; |
265 | 266 | ||
266 | /* if local hna has changed and interface is a primary interface */ | 267 | /* if local hna has changed and interface is a primary interface */ |
267 | if ((atomic_read(&bat_priv->hna_local_changed)) && | 268 | if ((atomic_read(&bat_priv->hna_local_changed)) && |
268 | (batman_if == bat_priv->primary_if)) | 269 | (hard_iface == bat_priv->primary_if)) |
269 | rebuild_batman_packet(bat_priv, batman_if); | 270 | rebuild_batman_packet(bat_priv, hard_iface); |
270 | 271 | ||
271 | /** | 272 | /** |
272 | * NOTE: packet_buff might just have been re-allocated in | 273 | * NOTE: packet_buff might just have been re-allocated in |
273 | * rebuild_batman_packet() | 274 | * rebuild_batman_packet() |
274 | */ | 275 | */ |
275 | batman_packet = (struct batman_packet *)batman_if->packet_buff; | 276 | batman_packet = (struct batman_packet *)hard_iface->packet_buff; |
276 | 277 | ||
277 | /* change sequence number to network order */ | 278 | /* change sequence number to network order */ |
278 | batman_packet->seqno = | 279 | batman_packet->seqno = |
279 | htonl((uint32_t)atomic_read(&batman_if->seqno)); | 280 | htonl((uint32_t)atomic_read(&hard_iface->seqno)); |
280 | 281 | ||
281 | if (vis_server == VIS_TYPE_SERVER_SYNC) | 282 | if (vis_server == VIS_TYPE_SERVER_SYNC) |
282 | batman_packet->flags |= VIS_SERVER; | 283 | batman_packet->flags |= VIS_SERVER; |
283 | else | 284 | else |
284 | batman_packet->flags &= ~VIS_SERVER; | 285 | batman_packet->flags &= ~VIS_SERVER; |
285 | 286 | ||
286 | if ((batman_if == bat_priv->primary_if) && | 287 | if ((hard_iface == bat_priv->primary_if) && |
287 | (atomic_read(&bat_priv->gw_mode) == GW_MODE_SERVER)) | 288 | (atomic_read(&bat_priv->gw_mode) == GW_MODE_SERVER)) |
288 | batman_packet->gw_flags = | 289 | batman_packet->gw_flags = |
289 | (uint8_t)atomic_read(&bat_priv->gw_bandwidth); | 290 | (uint8_t)atomic_read(&bat_priv->gw_bandwidth); |
290 | else | 291 | else |
291 | batman_packet->gw_flags = 0; | 292 | batman_packet->gw_flags = 0; |
292 | 293 | ||
293 | atomic_inc(&batman_if->seqno); | 294 | atomic_inc(&hard_iface->seqno); |
294 | 295 | ||
295 | slide_own_bcast_window(batman_if); | 296 | slide_own_bcast_window(hard_iface); |
296 | send_time = own_send_time(bat_priv); | 297 | send_time = own_send_time(bat_priv); |
297 | add_bat_packet_to_list(bat_priv, | 298 | add_bat_packet_to_list(bat_priv, |
298 | batman_if->packet_buff, | 299 | hard_iface->packet_buff, |
299 | batman_if->packet_len, | 300 | hard_iface->packet_len, |
300 | batman_if, 1, send_time); | 301 | hard_iface, 1, send_time); |
301 | } | 302 | } |
302 | 303 | ||
303 | void schedule_forward_packet(struct orig_node *orig_node, | 304 | void schedule_forward_packet(struct orig_node *orig_node, |
304 | struct ethhdr *ethhdr, | 305 | struct ethhdr *ethhdr, |
305 | struct batman_packet *batman_packet, | 306 | struct batman_packet *batman_packet, |
306 | uint8_t directlink, int hna_buff_len, | 307 | uint8_t directlink, int hna_buff_len, |
307 | struct batman_if *if_incoming) | 308 | struct hard_iface *if_incoming) |
308 | { | 309 | { |
309 | struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface); | 310 | struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface); |
310 | unsigned char in_tq, in_ttl, tq_avg = 0; | 311 | unsigned char in_tq, in_ttl, tq_avg = 0; |
@@ -326,7 +327,7 @@ void schedule_forward_packet(struct orig_node *orig_node, | |||
326 | if ((orig_node->router) && (orig_node->router->tq_avg != 0)) { | 327 | if ((orig_node->router) && (orig_node->router->tq_avg != 0)) { |
327 | 328 | ||
328 | /* rebroadcast ogm of best ranking neighbor as is */ | 329 | /* rebroadcast ogm of best ranking neighbor as is */ |
329 | if (!compare_orig(orig_node->router->addr, ethhdr->h_source)) { | 330 | if (!compare_eth(orig_node->router->addr, ethhdr->h_source)) { |
330 | batman_packet->tq = orig_node->router->tq_avg; | 331 | batman_packet->tq = orig_node->router->tq_avg; |
331 | 332 | ||
332 | if (orig_node->router->last_ttl) | 333 | if (orig_node->router->last_ttl) |
@@ -443,7 +444,7 @@ out: | |||
443 | 444 | ||
444 | static void send_outstanding_bcast_packet(struct work_struct *work) | 445 | static void send_outstanding_bcast_packet(struct work_struct *work) |
445 | { | 446 | { |
446 | struct batman_if *batman_if; | 447 | struct hard_iface *hard_iface; |
447 | struct delayed_work *delayed_work = | 448 | struct delayed_work *delayed_work = |
448 | container_of(work, struct delayed_work, work); | 449 | container_of(work, struct delayed_work, work); |
449 | struct forw_packet *forw_packet = | 450 | struct forw_packet *forw_packet = |
@@ -461,14 +462,14 @@ static void send_outstanding_bcast_packet(struct work_struct *work) | |||
461 | 462 | ||
462 | /* rebroadcast packet */ | 463 | /* rebroadcast packet */ |
463 | rcu_read_lock(); | 464 | rcu_read_lock(); |
464 | list_for_each_entry_rcu(batman_if, &if_list, list) { | 465 | list_for_each_entry_rcu(hard_iface, &hardif_list, list) { |
465 | if (batman_if->soft_iface != soft_iface) | 466 | if (hard_iface->soft_iface != soft_iface) |
466 | continue; | 467 | continue; |
467 | 468 | ||
468 | /* send a copy of the saved skb */ | 469 | /* send a copy of the saved skb */ |
469 | skb1 = skb_clone(forw_packet->skb, GFP_ATOMIC); | 470 | skb1 = skb_clone(forw_packet->skb, GFP_ATOMIC); |
470 | if (skb1) | 471 | if (skb1) |
471 | send_skb_packet(skb1, batman_if, broadcast_addr); | 472 | send_skb_packet(skb1, hard_iface, broadcast_addr); |
472 | } | 473 | } |
473 | rcu_read_unlock(); | 474 | rcu_read_unlock(); |
474 | 475 | ||
@@ -521,15 +522,15 @@ out: | |||
521 | } | 522 | } |
522 | 523 | ||
523 | void purge_outstanding_packets(struct bat_priv *bat_priv, | 524 | void purge_outstanding_packets(struct bat_priv *bat_priv, |
524 | struct batman_if *batman_if) | 525 | struct hard_iface *hard_iface) |
525 | { | 526 | { |
526 | struct forw_packet *forw_packet; | 527 | struct forw_packet *forw_packet; |
527 | struct hlist_node *tmp_node, *safe_tmp_node; | 528 | struct hlist_node *tmp_node, *safe_tmp_node; |
528 | 529 | ||
529 | if (batman_if) | 530 | if (hard_iface) |
530 | bat_dbg(DBG_BATMAN, bat_priv, | 531 | bat_dbg(DBG_BATMAN, bat_priv, |
531 | "purge_outstanding_packets(): %s\n", | 532 | "purge_outstanding_packets(): %s\n", |
532 | batman_if->net_dev->name); | 533 | hard_iface->net_dev->name); |
533 | else | 534 | else |
534 | bat_dbg(DBG_BATMAN, bat_priv, | 535 | bat_dbg(DBG_BATMAN, bat_priv, |
535 | "purge_outstanding_packets()\n"); | 536 | "purge_outstanding_packets()\n"); |
@@ -543,8 +544,8 @@ void purge_outstanding_packets(struct bat_priv *bat_priv, | |||
543 | * if purge_outstanding_packets() was called with an argmument | 544 | * if purge_outstanding_packets() was called with an argmument |
544 | * we delete only packets belonging to the given interface | 545 | * we delete only packets belonging to the given interface |
545 | */ | 546 | */ |
546 | if ((batman_if) && | 547 | if ((hard_iface) && |
547 | (forw_packet->if_incoming != batman_if)) | 548 | (forw_packet->if_incoming != hard_iface)) |
548 | continue; | 549 | continue; |
549 | 550 | ||
550 | spin_unlock_bh(&bat_priv->forw_bcast_list_lock); | 551 | spin_unlock_bh(&bat_priv->forw_bcast_list_lock); |
@@ -567,8 +568,8 @@ void purge_outstanding_packets(struct bat_priv *bat_priv, | |||
567 | * if purge_outstanding_packets() was called with an argmument | 568 | * if purge_outstanding_packets() was called with an argmument |
568 | * we delete only packets belonging to the given interface | 569 | * we delete only packets belonging to the given interface |
569 | */ | 570 | */ |
570 | if ((batman_if) && | 571 | if ((hard_iface) && |
571 | (forw_packet->if_incoming != batman_if)) | 572 | (forw_packet->if_incoming != hard_iface)) |
572 | continue; | 573 | continue; |
573 | 574 | ||
574 | spin_unlock_bh(&bat_priv->forw_bat_list_lock); | 575 | spin_unlock_bh(&bat_priv->forw_bat_list_lock); |
diff --git a/net/batman-adv/send.h b/net/batman-adv/send.h index b68c272cb84f..7b2ff19c05e7 100644 --- a/net/batman-adv/send.h +++ b/net/batman-adv/send.h | |||
@@ -23,17 +23,17 @@ | |||
23 | #define _NET_BATMAN_ADV_SEND_H_ | 23 | #define _NET_BATMAN_ADV_SEND_H_ |
24 | 24 | ||
25 | int send_skb_packet(struct sk_buff *skb, | 25 | int send_skb_packet(struct sk_buff *skb, |
26 | struct batman_if *batman_if, | 26 | struct hard_iface *hard_iface, |
27 | uint8_t *dst_addr); | 27 | uint8_t *dst_addr); |
28 | void schedule_own_packet(struct batman_if *batman_if); | 28 | void schedule_own_packet(struct hard_iface *hard_iface); |
29 | void schedule_forward_packet(struct orig_node *orig_node, | 29 | void schedule_forward_packet(struct orig_node *orig_node, |
30 | struct ethhdr *ethhdr, | 30 | struct ethhdr *ethhdr, |
31 | struct batman_packet *batman_packet, | 31 | struct batman_packet *batman_packet, |
32 | uint8_t directlink, int hna_buff_len, | 32 | uint8_t directlink, int hna_buff_len, |
33 | struct batman_if *if_outgoing); | 33 | struct hard_iface *if_outgoing); |
34 | int add_bcast_packet_to_list(struct bat_priv *bat_priv, struct sk_buff *skb); | 34 | int add_bcast_packet_to_list(struct bat_priv *bat_priv, struct sk_buff *skb); |
35 | void send_outstanding_bat_packet(struct work_struct *work); | 35 | void send_outstanding_bat_packet(struct work_struct *work); |
36 | void purge_outstanding_packets(struct bat_priv *bat_priv, | 36 | void purge_outstanding_packets(struct bat_priv *bat_priv, |
37 | struct batman_if *batman_if); | 37 | struct hard_iface *hard_iface); |
38 | 38 | ||
39 | #endif /* _NET_BATMAN_ADV_SEND_H_ */ | 39 | #endif /* _NET_BATMAN_ADV_SEND_H_ */ |
diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c index bd088f877e38..9ed26140a269 100644 --- a/net/batman-adv/soft-interface.c +++ b/net/batman-adv/soft-interface.c | |||
@@ -29,14 +29,12 @@ | |||
29 | #include "hash.h" | 29 | #include "hash.h" |
30 | #include "gateway_common.h" | 30 | #include "gateway_common.h" |
31 | #include "gateway_client.h" | 31 | #include "gateway_client.h" |
32 | #include "send.h" | ||
33 | #include "bat_sysfs.h" | 32 | #include "bat_sysfs.h" |
34 | #include <linux/slab.h> | 33 | #include <linux/slab.h> |
35 | #include <linux/ethtool.h> | 34 | #include <linux/ethtool.h> |
36 | #include <linux/etherdevice.h> | 35 | #include <linux/etherdevice.h> |
37 | #include <linux/if_vlan.h> | 36 | #include <linux/if_vlan.h> |
38 | #include "unicast.h" | 37 | #include "unicast.h" |
39 | #include "routing.h" | ||
40 | 38 | ||
41 | 39 | ||
42 | static int bat_get_settings(struct net_device *dev, struct ethtool_cmd *cmd); | 40 | static int bat_get_settings(struct net_device *dev, struct ethtool_cmd *cmd); |
@@ -78,20 +76,18 @@ int my_skb_head_push(struct sk_buff *skb, unsigned int len) | |||
78 | return 0; | 76 | return 0; |
79 | } | 77 | } |
80 | 78 | ||
81 | static void softif_neigh_free_ref(struct kref *refcount) | 79 | static void softif_neigh_free_rcu(struct rcu_head *rcu) |
82 | { | 80 | { |
83 | struct softif_neigh *softif_neigh; | 81 | struct softif_neigh *softif_neigh; |
84 | 82 | ||
85 | softif_neigh = container_of(refcount, struct softif_neigh, refcount); | 83 | softif_neigh = container_of(rcu, struct softif_neigh, rcu); |
86 | kfree(softif_neigh); | 84 | kfree(softif_neigh); |
87 | } | 85 | } |
88 | 86 | ||
89 | static void softif_neigh_free_rcu(struct rcu_head *rcu) | 87 | static void softif_neigh_free_ref(struct softif_neigh *softif_neigh) |
90 | { | 88 | { |
91 | struct softif_neigh *softif_neigh; | 89 | if (atomic_dec_and_test(&softif_neigh->refcount)) |
92 | 90 | call_rcu(&softif_neigh->rcu, softif_neigh_free_rcu); | |
93 | softif_neigh = container_of(rcu, struct softif_neigh, rcu); | ||
94 | kref_put(&softif_neigh->refcount, softif_neigh_free_ref); | ||
95 | } | 91 | } |
96 | 92 | ||
97 | void softif_neigh_purge(struct bat_priv *bat_priv) | 93 | void softif_neigh_purge(struct bat_priv *bat_priv) |
@@ -118,11 +114,10 @@ void softif_neigh_purge(struct bat_priv *bat_priv) | |||
118 | softif_neigh->addr, softif_neigh->vid); | 114 | softif_neigh->addr, softif_neigh->vid); |
119 | softif_neigh_tmp = bat_priv->softif_neigh; | 115 | softif_neigh_tmp = bat_priv->softif_neigh; |
120 | bat_priv->softif_neigh = NULL; | 116 | bat_priv->softif_neigh = NULL; |
121 | kref_put(&softif_neigh_tmp->refcount, | 117 | softif_neigh_free_ref(softif_neigh_tmp); |
122 | softif_neigh_free_ref); | ||
123 | } | 118 | } |
124 | 119 | ||
125 | call_rcu(&softif_neigh->rcu, softif_neigh_free_rcu); | 120 | softif_neigh_free_ref(softif_neigh); |
126 | } | 121 | } |
127 | 122 | ||
128 | spin_unlock_bh(&bat_priv->softif_neigh_lock); | 123 | spin_unlock_bh(&bat_priv->softif_neigh_lock); |
@@ -137,14 +132,17 @@ static struct softif_neigh *softif_neigh_get(struct bat_priv *bat_priv, | |||
137 | rcu_read_lock(); | 132 | rcu_read_lock(); |
138 | hlist_for_each_entry_rcu(softif_neigh, node, | 133 | hlist_for_each_entry_rcu(softif_neigh, node, |
139 | &bat_priv->softif_neigh_list, list) { | 134 | &bat_priv->softif_neigh_list, list) { |
140 | if (memcmp(softif_neigh->addr, addr, ETH_ALEN) != 0) | 135 | if (!compare_eth(softif_neigh->addr, addr)) |
141 | continue; | 136 | continue; |
142 | 137 | ||
143 | if (softif_neigh->vid != vid) | 138 | if (softif_neigh->vid != vid) |
144 | continue; | 139 | continue; |
145 | 140 | ||
141 | if (!atomic_inc_not_zero(&softif_neigh->refcount)) | ||
142 | continue; | ||
143 | |||
146 | softif_neigh->last_seen = jiffies; | 144 | softif_neigh->last_seen = jiffies; |
147 | goto found; | 145 | goto out; |
148 | } | 146 | } |
149 | 147 | ||
150 | softif_neigh = kzalloc(sizeof(struct softif_neigh), GFP_ATOMIC); | 148 | softif_neigh = kzalloc(sizeof(struct softif_neigh), GFP_ATOMIC); |
@@ -154,15 +152,14 @@ static struct softif_neigh *softif_neigh_get(struct bat_priv *bat_priv, | |||
154 | memcpy(softif_neigh->addr, addr, ETH_ALEN); | 152 | memcpy(softif_neigh->addr, addr, ETH_ALEN); |
155 | softif_neigh->vid = vid; | 153 | softif_neigh->vid = vid; |
156 | softif_neigh->last_seen = jiffies; | 154 | softif_neigh->last_seen = jiffies; |
157 | kref_init(&softif_neigh->refcount); | 155 | /* initialize with 2 - caller decrements counter by one */ |
156 | atomic_set(&softif_neigh->refcount, 2); | ||
158 | 157 | ||
159 | INIT_HLIST_NODE(&softif_neigh->list); | 158 | INIT_HLIST_NODE(&softif_neigh->list); |
160 | spin_lock_bh(&bat_priv->softif_neigh_lock); | 159 | spin_lock_bh(&bat_priv->softif_neigh_lock); |
161 | hlist_add_head_rcu(&softif_neigh->list, &bat_priv->softif_neigh_list); | 160 | hlist_add_head_rcu(&softif_neigh->list, &bat_priv->softif_neigh_list); |
162 | spin_unlock_bh(&bat_priv->softif_neigh_lock); | 161 | spin_unlock_bh(&bat_priv->softif_neigh_lock); |
163 | 162 | ||
164 | found: | ||
165 | kref_get(&softif_neigh->refcount); | ||
166 | out: | 163 | out: |
167 | rcu_read_unlock(); | 164 | rcu_read_unlock(); |
168 | return softif_neigh; | 165 | return softif_neigh; |
@@ -174,8 +171,6 @@ int softif_neigh_seq_print_text(struct seq_file *seq, void *offset) | |||
174 | struct bat_priv *bat_priv = netdev_priv(net_dev); | 171 | struct bat_priv *bat_priv = netdev_priv(net_dev); |
175 | struct softif_neigh *softif_neigh; | 172 | struct softif_neigh *softif_neigh; |
176 | struct hlist_node *node; | 173 | struct hlist_node *node; |
177 | size_t buf_size, pos; | ||
178 | char *buff; | ||
179 | 174 | ||
180 | if (!bat_priv->primary_if) { | 175 | if (!bat_priv->primary_if) { |
181 | return seq_printf(seq, "BATMAN mesh %s disabled - " | 176 | return seq_printf(seq, "BATMAN mesh %s disabled - " |
@@ -185,33 +180,15 @@ int softif_neigh_seq_print_text(struct seq_file *seq, void *offset) | |||
185 | 180 | ||
186 | seq_printf(seq, "Softif neighbor list (%s)\n", net_dev->name); | 181 | seq_printf(seq, "Softif neighbor list (%s)\n", net_dev->name); |
187 | 182 | ||
188 | buf_size = 1; | ||
189 | /* Estimate length for: " xx:xx:xx:xx:xx:xx\n" */ | ||
190 | rcu_read_lock(); | 183 | rcu_read_lock(); |
191 | hlist_for_each_entry_rcu(softif_neigh, node, | 184 | hlist_for_each_entry_rcu(softif_neigh, node, |
192 | &bat_priv->softif_neigh_list, list) | 185 | &bat_priv->softif_neigh_list, list) |
193 | buf_size += 30; | 186 | seq_printf(seq, "%s %pM (vid: %d)\n", |
194 | rcu_read_unlock(); | ||
195 | |||
196 | buff = kmalloc(buf_size, GFP_ATOMIC); | ||
197 | if (!buff) | ||
198 | return -ENOMEM; | ||
199 | |||
200 | buff[0] = '\0'; | ||
201 | pos = 0; | ||
202 | |||
203 | rcu_read_lock(); | ||
204 | hlist_for_each_entry_rcu(softif_neigh, node, | ||
205 | &bat_priv->softif_neigh_list, list) { | ||
206 | pos += snprintf(buff + pos, 31, "%s %pM (vid: %d)\n", | ||
207 | bat_priv->softif_neigh == softif_neigh | 187 | bat_priv->softif_neigh == softif_neigh |
208 | ? "=>" : " ", softif_neigh->addr, | 188 | ? "=>" : " ", softif_neigh->addr, |
209 | softif_neigh->vid); | 189 | softif_neigh->vid); |
210 | } | ||
211 | rcu_read_unlock(); | 190 | rcu_read_unlock(); |
212 | 191 | ||
213 | seq_printf(seq, "%s", buff); | ||
214 | kfree(buff); | ||
215 | return 0; | 192 | return 0; |
216 | } | 193 | } |
217 | 194 | ||
@@ -266,7 +243,7 @@ static void softif_batman_recv(struct sk_buff *skb, struct net_device *dev, | |||
266 | softif_neigh->addr, softif_neigh->vid); | 243 | softif_neigh->addr, softif_neigh->vid); |
267 | softif_neigh_tmp = bat_priv->softif_neigh; | 244 | softif_neigh_tmp = bat_priv->softif_neigh; |
268 | bat_priv->softif_neigh = softif_neigh; | 245 | bat_priv->softif_neigh = softif_neigh; |
269 | kref_put(&softif_neigh_tmp->refcount, softif_neigh_free_ref); | 246 | softif_neigh_free_ref(softif_neigh_tmp); |
270 | /* we need to hold the additional reference */ | 247 | /* we need to hold the additional reference */ |
271 | goto err; | 248 | goto err; |
272 | } | 249 | } |
@@ -284,7 +261,7 @@ static void softif_batman_recv(struct sk_buff *skb, struct net_device *dev, | |||
284 | } | 261 | } |
285 | 262 | ||
286 | out: | 263 | out: |
287 | kref_put(&softif_neigh->refcount, softif_neigh_free_ref); | 264 | softif_neigh_free_ref(softif_neigh); |
288 | err: | 265 | err: |
289 | kfree_skb(skb); | 266 | kfree_skb(skb); |
290 | return; | 267 | return; |
@@ -437,7 +414,7 @@ end: | |||
437 | } | 414 | } |
438 | 415 | ||
439 | void interface_rx(struct net_device *soft_iface, | 416 | void interface_rx(struct net_device *soft_iface, |
440 | struct sk_buff *skb, struct batman_if *recv_if, | 417 | struct sk_buff *skb, struct hard_iface *recv_if, |
441 | int hdr_size) | 418 | int hdr_size) |
442 | { | 419 | { |
443 | struct bat_priv *bat_priv = netdev_priv(soft_iface); | 420 | struct bat_priv *bat_priv = netdev_priv(soft_iface); |
@@ -485,7 +462,7 @@ void interface_rx(struct net_device *soft_iface, | |||
485 | 462 | ||
486 | memcpy(unicast_packet->dest, | 463 | memcpy(unicast_packet->dest, |
487 | bat_priv->softif_neigh->addr, ETH_ALEN); | 464 | bat_priv->softif_neigh->addr, ETH_ALEN); |
488 | ret = route_unicast_packet(skb, recv_if, hdr_size); | 465 | ret = route_unicast_packet(skb, recv_if); |
489 | if (ret == NET_RX_DROP) | 466 | if (ret == NET_RX_DROP) |
490 | goto dropped; | 467 | goto dropped; |
491 | 468 | ||
@@ -645,6 +622,19 @@ void softif_destroy(struct net_device *soft_iface) | |||
645 | unregister_netdevice(soft_iface); | 622 | unregister_netdevice(soft_iface); |
646 | } | 623 | } |
647 | 624 | ||
625 | int softif_is_valid(struct net_device *net_dev) | ||
626 | { | ||
627 | #ifdef HAVE_NET_DEVICE_OPS | ||
628 | if (net_dev->netdev_ops->ndo_start_xmit == interface_tx) | ||
629 | return 1; | ||
630 | #else | ||
631 | if (net_dev->hard_start_xmit == interface_tx) | ||
632 | return 1; | ||
633 | #endif | ||
634 | |||
635 | return 0; | ||
636 | } | ||
637 | |||
648 | /* ethtool */ | 638 | /* ethtool */ |
649 | static int bat_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) | 639 | static int bat_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) |
650 | { | 640 | { |
diff --git a/net/batman-adv/soft-interface.h b/net/batman-adv/soft-interface.h index e7b0e1a34a55..4789b6f2a0b3 100644 --- a/net/batman-adv/soft-interface.h +++ b/net/batman-adv/soft-interface.h | |||
@@ -27,9 +27,10 @@ int softif_neigh_seq_print_text(struct seq_file *seq, void *offset); | |||
27 | void softif_neigh_purge(struct bat_priv *bat_priv); | 27 | void softif_neigh_purge(struct bat_priv *bat_priv); |
28 | int interface_tx(struct sk_buff *skb, struct net_device *soft_iface); | 28 | int interface_tx(struct sk_buff *skb, struct net_device *soft_iface); |
29 | void interface_rx(struct net_device *soft_iface, | 29 | void interface_rx(struct net_device *soft_iface, |
30 | struct sk_buff *skb, struct batman_if *recv_if, | 30 | struct sk_buff *skb, struct hard_iface *recv_if, |
31 | int hdr_size); | 31 | int hdr_size); |
32 | struct net_device *softif_create(char *name); | 32 | struct net_device *softif_create(char *name); |
33 | void softif_destroy(struct net_device *soft_iface); | 33 | void softif_destroy(struct net_device *soft_iface); |
34 | int softif_is_valid(struct net_device *net_dev); | ||
34 | 35 | ||
35 | #endif /* _NET_BATMAN_ADV_SOFT_INTERFACE_H_ */ | 36 | #endif /* _NET_BATMAN_ADV_SOFT_INTERFACE_H_ */ |
diff --git a/net/batman-adv/translation-table.c b/net/batman-adv/translation-table.c index 7fb6726ccbdd..8d15b48d1692 100644 --- a/net/batman-adv/translation-table.c +++ b/net/batman-adv/translation-table.c | |||
@@ -30,12 +30,85 @@ static void _hna_global_del_orig(struct bat_priv *bat_priv, | |||
30 | struct hna_global_entry *hna_global_entry, | 30 | struct hna_global_entry *hna_global_entry, |
31 | char *message); | 31 | char *message); |
32 | 32 | ||
33 | /* returns 1 if they are the same mac addr */ | ||
34 | static int compare_lhna(struct hlist_node *node, void *data2) | ||
35 | { | ||
36 | void *data1 = container_of(node, struct hna_local_entry, hash_entry); | ||
37 | |||
38 | return (memcmp(data1, data2, ETH_ALEN) == 0 ? 1 : 0); | ||
39 | } | ||
40 | |||
41 | /* returns 1 if they are the same mac addr */ | ||
42 | static int compare_ghna(struct hlist_node *node, void *data2) | ||
43 | { | ||
44 | void *data1 = container_of(node, struct hna_global_entry, hash_entry); | ||
45 | |||
46 | return (memcmp(data1, data2, ETH_ALEN) == 0 ? 1 : 0); | ||
47 | } | ||
48 | |||
33 | static void hna_local_start_timer(struct bat_priv *bat_priv) | 49 | static void hna_local_start_timer(struct bat_priv *bat_priv) |
34 | { | 50 | { |
35 | INIT_DELAYED_WORK(&bat_priv->hna_work, hna_local_purge); | 51 | INIT_DELAYED_WORK(&bat_priv->hna_work, hna_local_purge); |
36 | queue_delayed_work(bat_event_workqueue, &bat_priv->hna_work, 10 * HZ); | 52 | queue_delayed_work(bat_event_workqueue, &bat_priv->hna_work, 10 * HZ); |
37 | } | 53 | } |
38 | 54 | ||
55 | static struct hna_local_entry *hna_local_hash_find(struct bat_priv *bat_priv, | ||
56 | void *data) | ||
57 | { | ||
58 | struct hashtable_t *hash = bat_priv->hna_local_hash; | ||
59 | struct hlist_head *head; | ||
60 | struct hlist_node *node; | ||
61 | struct hna_local_entry *hna_local_entry, *hna_local_entry_tmp = NULL; | ||
62 | int index; | ||
63 | |||
64 | if (!hash) | ||
65 | return NULL; | ||
66 | |||
67 | index = choose_orig(data, hash->size); | ||
68 | head = &hash->table[index]; | ||
69 | |||
70 | rcu_read_lock(); | ||
71 | hlist_for_each_entry_rcu(hna_local_entry, node, head, hash_entry) { | ||
72 | if (!compare_eth(hna_local_entry, data)) | ||
73 | continue; | ||
74 | |||
75 | hna_local_entry_tmp = hna_local_entry; | ||
76 | break; | ||
77 | } | ||
78 | rcu_read_unlock(); | ||
79 | |||
80 | return hna_local_entry_tmp; | ||
81 | } | ||
82 | |||
83 | static struct hna_global_entry *hna_global_hash_find(struct bat_priv *bat_priv, | ||
84 | void *data) | ||
85 | { | ||
86 | struct hashtable_t *hash = bat_priv->hna_global_hash; | ||
87 | struct hlist_head *head; | ||
88 | struct hlist_node *node; | ||
89 | struct hna_global_entry *hna_global_entry; | ||
90 | struct hna_global_entry *hna_global_entry_tmp = NULL; | ||
91 | int index; | ||
92 | |||
93 | if (!hash) | ||
94 | return NULL; | ||
95 | |||
96 | index = choose_orig(data, hash->size); | ||
97 | head = &hash->table[index]; | ||
98 | |||
99 | rcu_read_lock(); | ||
100 | hlist_for_each_entry_rcu(hna_global_entry, node, head, hash_entry) { | ||
101 | if (!compare_eth(hna_global_entry, data)) | ||
102 | continue; | ||
103 | |||
104 | hna_global_entry_tmp = hna_global_entry; | ||
105 | break; | ||
106 | } | ||
107 | rcu_read_unlock(); | ||
108 | |||
109 | return hna_global_entry_tmp; | ||
110 | } | ||
111 | |||
39 | int hna_local_init(struct bat_priv *bat_priv) | 112 | int hna_local_init(struct bat_priv *bat_priv) |
40 | { | 113 | { |
41 | if (bat_priv->hna_local_hash) | 114 | if (bat_priv->hna_local_hash) |
@@ -60,10 +133,7 @@ void hna_local_add(struct net_device *soft_iface, uint8_t *addr) | |||
60 | int required_bytes; | 133 | int required_bytes; |
61 | 134 | ||
62 | spin_lock_bh(&bat_priv->hna_lhash_lock); | 135 | spin_lock_bh(&bat_priv->hna_lhash_lock); |
63 | hna_local_entry = | 136 | hna_local_entry = hna_local_hash_find(bat_priv, addr); |
64 | ((struct hna_local_entry *)hash_find(bat_priv->hna_local_hash, | ||
65 | compare_orig, choose_orig, | ||
66 | addr)); | ||
67 | spin_unlock_bh(&bat_priv->hna_lhash_lock); | 137 | spin_unlock_bh(&bat_priv->hna_lhash_lock); |
68 | 138 | ||
69 | if (hna_local_entry) { | 139 | if (hna_local_entry) { |
@@ -99,15 +169,15 @@ void hna_local_add(struct net_device *soft_iface, uint8_t *addr) | |||
99 | hna_local_entry->last_seen = jiffies; | 169 | hna_local_entry->last_seen = jiffies; |
100 | 170 | ||
101 | /* the batman interface mac address should never be purged */ | 171 | /* the batman interface mac address should never be purged */ |
102 | if (compare_orig(addr, soft_iface->dev_addr)) | 172 | if (compare_eth(addr, soft_iface->dev_addr)) |
103 | hna_local_entry->never_purge = 1; | 173 | hna_local_entry->never_purge = 1; |
104 | else | 174 | else |
105 | hna_local_entry->never_purge = 0; | 175 | hna_local_entry->never_purge = 0; |
106 | 176 | ||
107 | spin_lock_bh(&bat_priv->hna_lhash_lock); | 177 | spin_lock_bh(&bat_priv->hna_lhash_lock); |
108 | 178 | ||
109 | hash_add(bat_priv->hna_local_hash, compare_orig, choose_orig, | 179 | hash_add(bat_priv->hna_local_hash, compare_lhna, choose_orig, |
110 | hna_local_entry); | 180 | hna_local_entry, &hna_local_entry->hash_entry); |
111 | bat_priv->num_local_hna++; | 181 | bat_priv->num_local_hna++; |
112 | atomic_set(&bat_priv->hna_local_changed, 1); | 182 | atomic_set(&bat_priv->hna_local_changed, 1); |
113 | 183 | ||
@@ -116,9 +186,7 @@ void hna_local_add(struct net_device *soft_iface, uint8_t *addr) | |||
116 | /* remove address from global hash if present */ | 186 | /* remove address from global hash if present */ |
117 | spin_lock_bh(&bat_priv->hna_ghash_lock); | 187 | spin_lock_bh(&bat_priv->hna_ghash_lock); |
118 | 188 | ||
119 | hna_global_entry = ((struct hna_global_entry *) | 189 | hna_global_entry = hna_global_hash_find(bat_priv, addr); |
120 | hash_find(bat_priv->hna_global_hash, | ||
121 | compare_orig, choose_orig, addr)); | ||
122 | 190 | ||
123 | if (hna_global_entry) | 191 | if (hna_global_entry) |
124 | _hna_global_del_orig(bat_priv, hna_global_entry, | 192 | _hna_global_del_orig(bat_priv, hna_global_entry, |
@@ -132,28 +200,27 @@ int hna_local_fill_buffer(struct bat_priv *bat_priv, | |||
132 | { | 200 | { |
133 | struct hashtable_t *hash = bat_priv->hna_local_hash; | 201 | struct hashtable_t *hash = bat_priv->hna_local_hash; |
134 | struct hna_local_entry *hna_local_entry; | 202 | struct hna_local_entry *hna_local_entry; |
135 | struct element_t *bucket; | 203 | struct hlist_node *node; |
136 | int i; | ||
137 | struct hlist_node *walk; | ||
138 | struct hlist_head *head; | 204 | struct hlist_head *head; |
139 | int count = 0; | 205 | int i, count = 0; |
140 | 206 | ||
141 | spin_lock_bh(&bat_priv->hna_lhash_lock); | 207 | spin_lock_bh(&bat_priv->hna_lhash_lock); |
142 | 208 | ||
143 | for (i = 0; i < hash->size; i++) { | 209 | for (i = 0; i < hash->size; i++) { |
144 | head = &hash->table[i]; | 210 | head = &hash->table[i]; |
145 | 211 | ||
146 | hlist_for_each_entry(bucket, walk, head, hlist) { | 212 | rcu_read_lock(); |
147 | 213 | hlist_for_each_entry_rcu(hna_local_entry, node, | |
214 | head, hash_entry) { | ||
148 | if (buff_len < (count + 1) * ETH_ALEN) | 215 | if (buff_len < (count + 1) * ETH_ALEN) |
149 | break; | 216 | break; |
150 | 217 | ||
151 | hna_local_entry = bucket->data; | ||
152 | memcpy(buff + (count * ETH_ALEN), hna_local_entry->addr, | 218 | memcpy(buff + (count * ETH_ALEN), hna_local_entry->addr, |
153 | ETH_ALEN); | 219 | ETH_ALEN); |
154 | 220 | ||
155 | count++; | 221 | count++; |
156 | } | 222 | } |
223 | rcu_read_unlock(); | ||
157 | } | 224 | } |
158 | 225 | ||
159 | /* if we did not get all new local hnas see you next time ;-) */ | 226 | /* if we did not get all new local hnas see you next time ;-) */ |
@@ -170,12 +237,11 @@ int hna_local_seq_print_text(struct seq_file *seq, void *offset) | |||
170 | struct bat_priv *bat_priv = netdev_priv(net_dev); | 237 | struct bat_priv *bat_priv = netdev_priv(net_dev); |
171 | struct hashtable_t *hash = bat_priv->hna_local_hash; | 238 | struct hashtable_t *hash = bat_priv->hna_local_hash; |
172 | struct hna_local_entry *hna_local_entry; | 239 | struct hna_local_entry *hna_local_entry; |
173 | int i; | 240 | struct hlist_node *node; |
174 | struct hlist_node *walk; | ||
175 | struct hlist_head *head; | 241 | struct hlist_head *head; |
176 | struct element_t *bucket; | ||
177 | size_t buf_size, pos; | 242 | size_t buf_size, pos; |
178 | char *buff; | 243 | char *buff; |
244 | int i; | ||
179 | 245 | ||
180 | if (!bat_priv->primary_if) { | 246 | if (!bat_priv->primary_if) { |
181 | return seq_printf(seq, "BATMAN mesh %s disabled - " | 247 | return seq_printf(seq, "BATMAN mesh %s disabled - " |
@@ -194,8 +260,10 @@ int hna_local_seq_print_text(struct seq_file *seq, void *offset) | |||
194 | for (i = 0; i < hash->size; i++) { | 260 | for (i = 0; i < hash->size; i++) { |
195 | head = &hash->table[i]; | 261 | head = &hash->table[i]; |
196 | 262 | ||
197 | hlist_for_each(walk, head) | 263 | rcu_read_lock(); |
264 | __hlist_for_each_rcu(node, head) | ||
198 | buf_size += 21; | 265 | buf_size += 21; |
266 | rcu_read_unlock(); | ||
199 | } | 267 | } |
200 | 268 | ||
201 | buff = kmalloc(buf_size, GFP_ATOMIC); | 269 | buff = kmalloc(buf_size, GFP_ATOMIC); |
@@ -203,18 +271,20 @@ int hna_local_seq_print_text(struct seq_file *seq, void *offset) | |||
203 | spin_unlock_bh(&bat_priv->hna_lhash_lock); | 271 | spin_unlock_bh(&bat_priv->hna_lhash_lock); |
204 | return -ENOMEM; | 272 | return -ENOMEM; |
205 | } | 273 | } |
274 | |||
206 | buff[0] = '\0'; | 275 | buff[0] = '\0'; |
207 | pos = 0; | 276 | pos = 0; |
208 | 277 | ||
209 | for (i = 0; i < hash->size; i++) { | 278 | for (i = 0; i < hash->size; i++) { |
210 | head = &hash->table[i]; | 279 | head = &hash->table[i]; |
211 | 280 | ||
212 | hlist_for_each_entry(bucket, walk, head, hlist) { | 281 | rcu_read_lock(); |
213 | hna_local_entry = bucket->data; | 282 | hlist_for_each_entry_rcu(hna_local_entry, node, |
214 | 283 | head, hash_entry) { | |
215 | pos += snprintf(buff + pos, 22, " * %pM\n", | 284 | pos += snprintf(buff + pos, 22, " * %pM\n", |
216 | hna_local_entry->addr); | 285 | hna_local_entry->addr); |
217 | } | 286 | } |
287 | rcu_read_unlock(); | ||
218 | } | 288 | } |
219 | 289 | ||
220 | spin_unlock_bh(&bat_priv->hna_lhash_lock); | 290 | spin_unlock_bh(&bat_priv->hna_lhash_lock); |
@@ -224,9 +294,10 @@ int hna_local_seq_print_text(struct seq_file *seq, void *offset) | |||
224 | return 0; | 294 | return 0; |
225 | } | 295 | } |
226 | 296 | ||
227 | static void _hna_local_del(void *data, void *arg) | 297 | static void _hna_local_del(struct hlist_node *node, void *arg) |
228 | { | 298 | { |
229 | struct bat_priv *bat_priv = (struct bat_priv *)arg; | 299 | struct bat_priv *bat_priv = (struct bat_priv *)arg; |
300 | void *data = container_of(node, struct hna_local_entry, hash_entry); | ||
230 | 301 | ||
231 | kfree(data); | 302 | kfree(data); |
232 | bat_priv->num_local_hna--; | 303 | bat_priv->num_local_hna--; |
@@ -240,9 +311,9 @@ static void hna_local_del(struct bat_priv *bat_priv, | |||
240 | bat_dbg(DBG_ROUTES, bat_priv, "Deleting local hna entry (%pM): %s\n", | 311 | bat_dbg(DBG_ROUTES, bat_priv, "Deleting local hna entry (%pM): %s\n", |
241 | hna_local_entry->addr, message); | 312 | hna_local_entry->addr, message); |
242 | 313 | ||
243 | hash_remove(bat_priv->hna_local_hash, compare_orig, choose_orig, | 314 | hash_remove(bat_priv->hna_local_hash, compare_lhna, choose_orig, |
244 | hna_local_entry->addr); | 315 | hna_local_entry->addr); |
245 | _hna_local_del(hna_local_entry, bat_priv); | 316 | _hna_local_del(&hna_local_entry->hash_entry, bat_priv); |
246 | } | 317 | } |
247 | 318 | ||
248 | void hna_local_remove(struct bat_priv *bat_priv, | 319 | void hna_local_remove(struct bat_priv *bat_priv, |
@@ -252,9 +323,7 @@ void hna_local_remove(struct bat_priv *bat_priv, | |||
252 | 323 | ||
253 | spin_lock_bh(&bat_priv->hna_lhash_lock); | 324 | spin_lock_bh(&bat_priv->hna_lhash_lock); |
254 | 325 | ||
255 | hna_local_entry = (struct hna_local_entry *) | 326 | hna_local_entry = hna_local_hash_find(bat_priv, addr); |
256 | hash_find(bat_priv->hna_local_hash, compare_orig, choose_orig, | ||
257 | addr); | ||
258 | 327 | ||
259 | if (hna_local_entry) | 328 | if (hna_local_entry) |
260 | hna_local_del(bat_priv, hna_local_entry, message); | 329 | hna_local_del(bat_priv, hna_local_entry, message); |
@@ -270,27 +339,29 @@ static void hna_local_purge(struct work_struct *work) | |||
270 | container_of(delayed_work, struct bat_priv, hna_work); | 339 | container_of(delayed_work, struct bat_priv, hna_work); |
271 | struct hashtable_t *hash = bat_priv->hna_local_hash; | 340 | struct hashtable_t *hash = bat_priv->hna_local_hash; |
272 | struct hna_local_entry *hna_local_entry; | 341 | struct hna_local_entry *hna_local_entry; |
273 | int i; | 342 | struct hlist_node *node, *node_tmp; |
274 | struct hlist_node *walk, *safe; | ||
275 | struct hlist_head *head; | 343 | struct hlist_head *head; |
276 | struct element_t *bucket; | ||
277 | unsigned long timeout; | 344 | unsigned long timeout; |
345 | int i; | ||
278 | 346 | ||
279 | spin_lock_bh(&bat_priv->hna_lhash_lock); | 347 | spin_lock_bh(&bat_priv->hna_lhash_lock); |
280 | 348 | ||
281 | for (i = 0; i < hash->size; i++) { | 349 | for (i = 0; i < hash->size; i++) { |
282 | head = &hash->table[i]; | 350 | head = &hash->table[i]; |
283 | 351 | ||
284 | hlist_for_each_entry_safe(bucket, walk, safe, head, hlist) { | 352 | hlist_for_each_entry_safe(hna_local_entry, node, node_tmp, |
285 | hna_local_entry = bucket->data; | 353 | head, hash_entry) { |
354 | if (hna_local_entry->never_purge) | ||
355 | continue; | ||
286 | 356 | ||
287 | timeout = hna_local_entry->last_seen; | 357 | timeout = hna_local_entry->last_seen; |
288 | timeout += LOCAL_HNA_TIMEOUT * HZ; | 358 | timeout += LOCAL_HNA_TIMEOUT * HZ; |
289 | 359 | ||
290 | if ((!hna_local_entry->never_purge) && | 360 | if (time_before(jiffies, timeout)) |
291 | time_after(jiffies, timeout)) | 361 | continue; |
292 | hna_local_del(bat_priv, hna_local_entry, | 362 | |
293 | "address timed out"); | 363 | hna_local_del(bat_priv, hna_local_entry, |
364 | "address timed out"); | ||
294 | } | 365 | } |
295 | } | 366 | } |
296 | 367 | ||
@@ -334,9 +405,7 @@ void hna_global_add_orig(struct bat_priv *bat_priv, | |||
334 | spin_lock_bh(&bat_priv->hna_ghash_lock); | 405 | spin_lock_bh(&bat_priv->hna_ghash_lock); |
335 | 406 | ||
336 | hna_ptr = hna_buff + (hna_buff_count * ETH_ALEN); | 407 | hna_ptr = hna_buff + (hna_buff_count * ETH_ALEN); |
337 | hna_global_entry = (struct hna_global_entry *) | 408 | hna_global_entry = hna_global_hash_find(bat_priv, hna_ptr); |
338 | hash_find(bat_priv->hna_global_hash, compare_orig, | ||
339 | choose_orig, hna_ptr); | ||
340 | 409 | ||
341 | if (!hna_global_entry) { | 410 | if (!hna_global_entry) { |
342 | spin_unlock_bh(&bat_priv->hna_ghash_lock); | 411 | spin_unlock_bh(&bat_priv->hna_ghash_lock); |
@@ -356,8 +425,9 @@ void hna_global_add_orig(struct bat_priv *bat_priv, | |||
356 | hna_global_entry->addr, orig_node->orig); | 425 | hna_global_entry->addr, orig_node->orig); |
357 | 426 | ||
358 | spin_lock_bh(&bat_priv->hna_ghash_lock); | 427 | spin_lock_bh(&bat_priv->hna_ghash_lock); |
359 | hash_add(bat_priv->hna_global_hash, compare_orig, | 428 | hash_add(bat_priv->hna_global_hash, compare_ghna, |
360 | choose_orig, hna_global_entry); | 429 | choose_orig, hna_global_entry, |
430 | &hna_global_entry->hash_entry); | ||
361 | 431 | ||
362 | } | 432 | } |
363 | 433 | ||
@@ -368,9 +438,7 @@ void hna_global_add_orig(struct bat_priv *bat_priv, | |||
368 | spin_lock_bh(&bat_priv->hna_lhash_lock); | 438 | spin_lock_bh(&bat_priv->hna_lhash_lock); |
369 | 439 | ||
370 | hna_ptr = hna_buff + (hna_buff_count * ETH_ALEN); | 440 | hna_ptr = hna_buff + (hna_buff_count * ETH_ALEN); |
371 | hna_local_entry = (struct hna_local_entry *) | 441 | hna_local_entry = hna_local_hash_find(bat_priv, hna_ptr); |
372 | hash_find(bat_priv->hna_local_hash, compare_orig, | ||
373 | choose_orig, hna_ptr); | ||
374 | 442 | ||
375 | if (hna_local_entry) | 443 | if (hna_local_entry) |
376 | hna_local_del(bat_priv, hna_local_entry, | 444 | hna_local_del(bat_priv, hna_local_entry, |
@@ -400,12 +468,11 @@ int hna_global_seq_print_text(struct seq_file *seq, void *offset) | |||
400 | struct bat_priv *bat_priv = netdev_priv(net_dev); | 468 | struct bat_priv *bat_priv = netdev_priv(net_dev); |
401 | struct hashtable_t *hash = bat_priv->hna_global_hash; | 469 | struct hashtable_t *hash = bat_priv->hna_global_hash; |
402 | struct hna_global_entry *hna_global_entry; | 470 | struct hna_global_entry *hna_global_entry; |
403 | int i; | 471 | struct hlist_node *node; |
404 | struct hlist_node *walk; | ||
405 | struct hlist_head *head; | 472 | struct hlist_head *head; |
406 | struct element_t *bucket; | ||
407 | size_t buf_size, pos; | 473 | size_t buf_size, pos; |
408 | char *buff; | 474 | char *buff; |
475 | int i; | ||
409 | 476 | ||
410 | if (!bat_priv->primary_if) { | 477 | if (!bat_priv->primary_if) { |
411 | return seq_printf(seq, "BATMAN mesh %s disabled - " | 478 | return seq_printf(seq, "BATMAN mesh %s disabled - " |
@@ -423,8 +490,10 @@ int hna_global_seq_print_text(struct seq_file *seq, void *offset) | |||
423 | for (i = 0; i < hash->size; i++) { | 490 | for (i = 0; i < hash->size; i++) { |
424 | head = &hash->table[i]; | 491 | head = &hash->table[i]; |
425 | 492 | ||
426 | hlist_for_each(walk, head) | 493 | rcu_read_lock(); |
494 | __hlist_for_each_rcu(node, head) | ||
427 | buf_size += 43; | 495 | buf_size += 43; |
496 | rcu_read_unlock(); | ||
428 | } | 497 | } |
429 | 498 | ||
430 | buff = kmalloc(buf_size, GFP_ATOMIC); | 499 | buff = kmalloc(buf_size, GFP_ATOMIC); |
@@ -438,14 +507,15 @@ int hna_global_seq_print_text(struct seq_file *seq, void *offset) | |||
438 | for (i = 0; i < hash->size; i++) { | 507 | for (i = 0; i < hash->size; i++) { |
439 | head = &hash->table[i]; | 508 | head = &hash->table[i]; |
440 | 509 | ||
441 | hlist_for_each_entry(bucket, walk, head, hlist) { | 510 | rcu_read_lock(); |
442 | hna_global_entry = bucket->data; | 511 | hlist_for_each_entry_rcu(hna_global_entry, node, |
443 | 512 | head, hash_entry) { | |
444 | pos += snprintf(buff + pos, 44, | 513 | pos += snprintf(buff + pos, 44, |
445 | " * %pM via %pM\n", | 514 | " * %pM via %pM\n", |
446 | hna_global_entry->addr, | 515 | hna_global_entry->addr, |
447 | hna_global_entry->orig_node->orig); | 516 | hna_global_entry->orig_node->orig); |
448 | } | 517 | } |
518 | rcu_read_unlock(); | ||
449 | } | 519 | } |
450 | 520 | ||
451 | spin_unlock_bh(&bat_priv->hna_ghash_lock); | 521 | spin_unlock_bh(&bat_priv->hna_ghash_lock); |
@@ -464,7 +534,7 @@ static void _hna_global_del_orig(struct bat_priv *bat_priv, | |||
464 | hna_global_entry->addr, hna_global_entry->orig_node->orig, | 534 | hna_global_entry->addr, hna_global_entry->orig_node->orig, |
465 | message); | 535 | message); |
466 | 536 | ||
467 | hash_remove(bat_priv->hna_global_hash, compare_orig, choose_orig, | 537 | hash_remove(bat_priv->hna_global_hash, compare_ghna, choose_orig, |
468 | hna_global_entry->addr); | 538 | hna_global_entry->addr); |
469 | kfree(hna_global_entry); | 539 | kfree(hna_global_entry); |
470 | } | 540 | } |
@@ -483,9 +553,7 @@ void hna_global_del_orig(struct bat_priv *bat_priv, | |||
483 | 553 | ||
484 | while ((hna_buff_count + 1) * ETH_ALEN <= orig_node->hna_buff_len) { | 554 | while ((hna_buff_count + 1) * ETH_ALEN <= orig_node->hna_buff_len) { |
485 | hna_ptr = orig_node->hna_buff + (hna_buff_count * ETH_ALEN); | 555 | hna_ptr = orig_node->hna_buff + (hna_buff_count * ETH_ALEN); |
486 | hna_global_entry = (struct hna_global_entry *) | 556 | hna_global_entry = hna_global_hash_find(bat_priv, hna_ptr); |
487 | hash_find(bat_priv->hna_global_hash, compare_orig, | ||
488 | choose_orig, hna_ptr); | ||
489 | 557 | ||
490 | if ((hna_global_entry) && | 558 | if ((hna_global_entry) && |
491 | (hna_global_entry->orig_node == orig_node)) | 559 | (hna_global_entry->orig_node == orig_node)) |
@@ -502,8 +570,10 @@ void hna_global_del_orig(struct bat_priv *bat_priv, | |||
502 | orig_node->hna_buff = NULL; | 570 | orig_node->hna_buff = NULL; |
503 | } | 571 | } |
504 | 572 | ||
505 | static void hna_global_del(void *data, void *arg) | 573 | static void hna_global_del(struct hlist_node *node, void *arg) |
506 | { | 574 | { |
575 | void *data = container_of(node, struct hna_global_entry, hash_entry); | ||
576 | |||
507 | kfree(data); | 577 | kfree(data); |
508 | } | 578 | } |
509 | 579 | ||
@@ -519,15 +589,20 @@ void hna_global_free(struct bat_priv *bat_priv) | |||
519 | struct orig_node *transtable_search(struct bat_priv *bat_priv, uint8_t *addr) | 589 | struct orig_node *transtable_search(struct bat_priv *bat_priv, uint8_t *addr) |
520 | { | 590 | { |
521 | struct hna_global_entry *hna_global_entry; | 591 | struct hna_global_entry *hna_global_entry; |
592 | struct orig_node *orig_node = NULL; | ||
522 | 593 | ||
523 | spin_lock_bh(&bat_priv->hna_ghash_lock); | 594 | spin_lock_bh(&bat_priv->hna_ghash_lock); |
524 | hna_global_entry = (struct hna_global_entry *) | 595 | hna_global_entry = hna_global_hash_find(bat_priv, addr); |
525 | hash_find(bat_priv->hna_global_hash, | ||
526 | compare_orig, choose_orig, addr); | ||
527 | spin_unlock_bh(&bat_priv->hna_ghash_lock); | ||
528 | 596 | ||
529 | if (!hna_global_entry) | 597 | if (!hna_global_entry) |
530 | return NULL; | 598 | goto out; |
531 | 599 | ||
532 | return hna_global_entry->orig_node; | 600 | if (!atomic_inc_not_zero(&hna_global_entry->orig_node->refcount)) |
601 | goto out; | ||
602 | |||
603 | orig_node = hna_global_entry->orig_node; | ||
604 | |||
605 | out: | ||
606 | spin_unlock_bh(&bat_priv->hna_ghash_lock); | ||
607 | return orig_node; | ||
533 | } | 608 | } |
diff --git a/net/batman-adv/types.h b/net/batman-adv/types.h index 7270405046e9..83445cf0cc9f 100644 --- a/net/batman-adv/types.h +++ b/net/batman-adv/types.h | |||
@@ -33,7 +33,7 @@ | |||
33 | sizeof(struct bcast_packet)))) | 33 | sizeof(struct bcast_packet)))) |
34 | 34 | ||
35 | 35 | ||
36 | struct batman_if { | 36 | struct hard_iface { |
37 | struct list_head list; | 37 | struct list_head list; |
38 | int16_t if_num; | 38 | int16_t if_num; |
39 | char if_status; | 39 | char if_status; |
@@ -43,7 +43,7 @@ struct batman_if { | |||
43 | unsigned char *packet_buff; | 43 | unsigned char *packet_buff; |
44 | int packet_len; | 44 | int packet_len; |
45 | struct kobject *hardif_obj; | 45 | struct kobject *hardif_obj; |
46 | struct kref refcount; | 46 | atomic_t refcount; |
47 | struct packet_type batman_adv_ptype; | 47 | struct packet_type batman_adv_ptype; |
48 | struct net_device *soft_iface; | 48 | struct net_device *soft_iface; |
49 | struct rcu_head rcu; | 49 | struct rcu_head rcu; |
@@ -70,8 +70,6 @@ struct orig_node { | |||
70 | struct neigh_node *router; | 70 | struct neigh_node *router; |
71 | unsigned long *bcast_own; | 71 | unsigned long *bcast_own; |
72 | uint8_t *bcast_own_sum; | 72 | uint8_t *bcast_own_sum; |
73 | uint8_t tq_own; | ||
74 | int tq_asym_penalty; | ||
75 | unsigned long last_valid; | 73 | unsigned long last_valid; |
76 | unsigned long bcast_seqno_reset; | 74 | unsigned long bcast_seqno_reset; |
77 | unsigned long batman_seqno_reset; | 75 | unsigned long batman_seqno_reset; |
@@ -83,20 +81,28 @@ struct orig_node { | |||
83 | uint8_t last_ttl; | 81 | uint8_t last_ttl; |
84 | unsigned long bcast_bits[NUM_WORDS]; | 82 | unsigned long bcast_bits[NUM_WORDS]; |
85 | uint32_t last_bcast_seqno; | 83 | uint32_t last_bcast_seqno; |
86 | struct list_head neigh_list; | 84 | struct hlist_head neigh_list; |
87 | struct list_head frag_list; | 85 | struct list_head frag_list; |
86 | spinlock_t neigh_list_lock; /* protects neighbor list */ | ||
87 | atomic_t refcount; | ||
88 | struct rcu_head rcu; | ||
89 | struct hlist_node hash_entry; | ||
90 | struct bat_priv *bat_priv; | ||
88 | unsigned long last_frag_packet; | 91 | unsigned long last_frag_packet; |
89 | struct { | 92 | spinlock_t ogm_cnt_lock; /* protects: bcast_own, bcast_own_sum, |
90 | uint8_t candidates; | 93 | * neigh_node->real_bits, |
91 | struct neigh_node *selected; | 94 | * neigh_node->real_packet_count */ |
92 | } bond; | 95 | spinlock_t bcast_seqno_lock; /* protects bcast_bits, |
96 | * last_bcast_seqno */ | ||
97 | atomic_t bond_candidates; | ||
98 | struct list_head bond_list; | ||
93 | }; | 99 | }; |
94 | 100 | ||
95 | struct gw_node { | 101 | struct gw_node { |
96 | struct hlist_node list; | 102 | struct hlist_node list; |
97 | struct orig_node *orig_node; | 103 | struct orig_node *orig_node; |
98 | unsigned long deleted; | 104 | unsigned long deleted; |
99 | struct kref refcount; | 105 | atomic_t refcount; |
100 | struct rcu_head rcu; | 106 | struct rcu_head rcu; |
101 | }; | 107 | }; |
102 | 108 | ||
@@ -105,18 +111,20 @@ struct gw_node { | |||
105 | * @last_valid: when last packet via this neighbor was received | 111 | * @last_valid: when last packet via this neighbor was received |
106 | */ | 112 | */ |
107 | struct neigh_node { | 113 | struct neigh_node { |
108 | struct list_head list; | 114 | struct hlist_node list; |
109 | uint8_t addr[ETH_ALEN]; | 115 | uint8_t addr[ETH_ALEN]; |
110 | uint8_t real_packet_count; | 116 | uint8_t real_packet_count; |
111 | uint8_t tq_recv[TQ_GLOBAL_WINDOW_SIZE]; | 117 | uint8_t tq_recv[TQ_GLOBAL_WINDOW_SIZE]; |
112 | uint8_t tq_index; | 118 | uint8_t tq_index; |
113 | uint8_t tq_avg; | 119 | uint8_t tq_avg; |
114 | uint8_t last_ttl; | 120 | uint8_t last_ttl; |
115 | struct neigh_node *next_bond_candidate; | 121 | struct list_head bonding_list; |
116 | unsigned long last_valid; | 122 | unsigned long last_valid; |
117 | unsigned long real_bits[NUM_WORDS]; | 123 | unsigned long real_bits[NUM_WORDS]; |
124 | atomic_t refcount; | ||
125 | struct rcu_head rcu; | ||
118 | struct orig_node *orig_node; | 126 | struct orig_node *orig_node; |
119 | struct batman_if *if_incoming; | 127 | struct hard_iface *if_incoming; |
120 | }; | 128 | }; |
121 | 129 | ||
122 | 130 | ||
@@ -140,7 +148,7 @@ struct bat_priv { | |||
140 | struct hlist_head softif_neigh_list; | 148 | struct hlist_head softif_neigh_list; |
141 | struct softif_neigh *softif_neigh; | 149 | struct softif_neigh *softif_neigh; |
142 | struct debug_log *debug_log; | 150 | struct debug_log *debug_log; |
143 | struct batman_if *primary_if; | 151 | struct hard_iface *primary_if; |
144 | struct kobject *mesh_obj; | 152 | struct kobject *mesh_obj; |
145 | struct dentry *debug_dir; | 153 | struct dentry *debug_dir; |
146 | struct hlist_head forw_bat_list; | 154 | struct hlist_head forw_bat_list; |
@@ -151,12 +159,11 @@ struct bat_priv { | |||
151 | struct hashtable_t *hna_local_hash; | 159 | struct hashtable_t *hna_local_hash; |
152 | struct hashtable_t *hna_global_hash; | 160 | struct hashtable_t *hna_global_hash; |
153 | struct hashtable_t *vis_hash; | 161 | struct hashtable_t *vis_hash; |
154 | spinlock_t orig_hash_lock; /* protects orig_hash */ | ||
155 | spinlock_t forw_bat_list_lock; /* protects forw_bat_list */ | 162 | spinlock_t forw_bat_list_lock; /* protects forw_bat_list */ |
156 | spinlock_t forw_bcast_list_lock; /* protects */ | 163 | spinlock_t forw_bcast_list_lock; /* protects */ |
157 | spinlock_t hna_lhash_lock; /* protects hna_local_hash */ | 164 | spinlock_t hna_lhash_lock; /* protects hna_local_hash */ |
158 | spinlock_t hna_ghash_lock; /* protects hna_global_hash */ | 165 | spinlock_t hna_ghash_lock; /* protects hna_global_hash */ |
159 | spinlock_t gw_list_lock; /* protects gw_list */ | 166 | spinlock_t gw_list_lock; /* protects gw_list and curr_gw */ |
160 | spinlock_t vis_hash_lock; /* protects vis_hash */ | 167 | spinlock_t vis_hash_lock; /* protects vis_hash */ |
161 | spinlock_t vis_list_lock; /* protects vis_info::recv_list */ | 168 | spinlock_t vis_list_lock; /* protects vis_info::recv_list */ |
162 | spinlock_t softif_neigh_lock; /* protects soft-interface neigh list */ | 169 | spinlock_t softif_neigh_lock; /* protects soft-interface neigh list */ |
@@ -165,7 +172,7 @@ struct bat_priv { | |||
165 | struct delayed_work hna_work; | 172 | struct delayed_work hna_work; |
166 | struct delayed_work orig_work; | 173 | struct delayed_work orig_work; |
167 | struct delayed_work vis_work; | 174 | struct delayed_work vis_work; |
168 | struct gw_node *curr_gw; | 175 | struct gw_node __rcu *curr_gw; /* rcu protected pointer */ |
169 | struct vis_info *my_vis_info; | 176 | struct vis_info *my_vis_info; |
170 | }; | 177 | }; |
171 | 178 | ||
@@ -188,11 +195,13 @@ struct hna_local_entry { | |||
188 | uint8_t addr[ETH_ALEN]; | 195 | uint8_t addr[ETH_ALEN]; |
189 | unsigned long last_seen; | 196 | unsigned long last_seen; |
190 | char never_purge; | 197 | char never_purge; |
198 | struct hlist_node hash_entry; | ||
191 | }; | 199 | }; |
192 | 200 | ||
193 | struct hna_global_entry { | 201 | struct hna_global_entry { |
194 | uint8_t addr[ETH_ALEN]; | 202 | uint8_t addr[ETH_ALEN]; |
195 | struct orig_node *orig_node; | 203 | struct orig_node *orig_node; |
204 | struct hlist_node hash_entry; | ||
196 | }; | 205 | }; |
197 | 206 | ||
198 | /** | 207 | /** |
@@ -208,7 +217,7 @@ struct forw_packet { | |||
208 | uint32_t direct_link_flags; | 217 | uint32_t direct_link_flags; |
209 | uint8_t num_packets; | 218 | uint8_t num_packets; |
210 | struct delayed_work delayed_work; | 219 | struct delayed_work delayed_work; |
211 | struct batman_if *if_incoming; | 220 | struct hard_iface *if_incoming; |
212 | }; | 221 | }; |
213 | 222 | ||
214 | /* While scanning for vis-entries of a particular vis-originator | 223 | /* While scanning for vis-entries of a particular vis-originator |
@@ -242,6 +251,7 @@ struct vis_info { | |||
242 | * from. we should not reply to them. */ | 251 | * from. we should not reply to them. */ |
243 | struct list_head send_list; | 252 | struct list_head send_list; |
244 | struct kref refcount; | 253 | struct kref refcount; |
254 | struct hlist_node hash_entry; | ||
245 | struct bat_priv *bat_priv; | 255 | struct bat_priv *bat_priv; |
246 | /* this packet might be part of the vis send queue. */ | 256 | /* this packet might be part of the vis send queue. */ |
247 | struct sk_buff *skb_packet; | 257 | struct sk_buff *skb_packet; |
@@ -264,7 +274,7 @@ struct softif_neigh { | |||
264 | uint8_t addr[ETH_ALEN]; | 274 | uint8_t addr[ETH_ALEN]; |
265 | unsigned long last_seen; | 275 | unsigned long last_seen; |
266 | short vid; | 276 | short vid; |
267 | struct kref refcount; | 277 | atomic_t refcount; |
268 | struct rcu_head rcu; | 278 | struct rcu_head rcu; |
269 | }; | 279 | }; |
270 | 280 | ||
diff --git a/net/batman-adv/unicast.c b/net/batman-adv/unicast.c index 121b11d2a23d..19f84bd443af 100644 --- a/net/batman-adv/unicast.c +++ b/net/batman-adv/unicast.c | |||
@@ -183,15 +183,10 @@ int frag_reassemble_skb(struct sk_buff *skb, struct bat_priv *bat_priv, | |||
183 | (struct unicast_frag_packet *)skb->data; | 183 | (struct unicast_frag_packet *)skb->data; |
184 | 184 | ||
185 | *new_skb = NULL; | 185 | *new_skb = NULL; |
186 | spin_lock_bh(&bat_priv->orig_hash_lock); | ||
187 | orig_node = ((struct orig_node *) | ||
188 | hash_find(bat_priv->orig_hash, compare_orig, choose_orig, | ||
189 | unicast_packet->orig)); | ||
190 | 186 | ||
191 | if (!orig_node) { | 187 | orig_node = orig_hash_find(bat_priv, unicast_packet->orig); |
192 | pr_debug("couldn't find originator in orig_hash\n"); | 188 | if (!orig_node) |
193 | goto out; | 189 | goto out; |
194 | } | ||
195 | 190 | ||
196 | orig_node->last_frag_packet = jiffies; | 191 | orig_node->last_frag_packet = jiffies; |
197 | 192 | ||
@@ -215,14 +210,15 @@ int frag_reassemble_skb(struct sk_buff *skb, struct bat_priv *bat_priv, | |||
215 | /* if not, merge failed */ | 210 | /* if not, merge failed */ |
216 | if (*new_skb) | 211 | if (*new_skb) |
217 | ret = NET_RX_SUCCESS; | 212 | ret = NET_RX_SUCCESS; |
218 | out: | ||
219 | spin_unlock_bh(&bat_priv->orig_hash_lock); | ||
220 | 213 | ||
214 | out: | ||
215 | if (orig_node) | ||
216 | orig_node_free_ref(orig_node); | ||
221 | return ret; | 217 | return ret; |
222 | } | 218 | } |
223 | 219 | ||
224 | int frag_send_skb(struct sk_buff *skb, struct bat_priv *bat_priv, | 220 | int frag_send_skb(struct sk_buff *skb, struct bat_priv *bat_priv, |
225 | struct batman_if *batman_if, uint8_t dstaddr[]) | 221 | struct hard_iface *hard_iface, uint8_t dstaddr[]) |
226 | { | 222 | { |
227 | struct unicast_packet tmp_uc, *unicast_packet; | 223 | struct unicast_packet tmp_uc, *unicast_packet; |
228 | struct sk_buff *frag_skb; | 224 | struct sk_buff *frag_skb; |
@@ -267,12 +263,12 @@ int frag_send_skb(struct sk_buff *skb, struct bat_priv *bat_priv, | |||
267 | frag1->flags = UNI_FRAG_HEAD | large_tail; | 263 | frag1->flags = UNI_FRAG_HEAD | large_tail; |
268 | frag2->flags = large_tail; | 264 | frag2->flags = large_tail; |
269 | 265 | ||
270 | seqno = atomic_add_return(2, &batman_if->frag_seqno); | 266 | seqno = atomic_add_return(2, &hard_iface->frag_seqno); |
271 | frag1->seqno = htons(seqno - 1); | 267 | frag1->seqno = htons(seqno - 1); |
272 | frag2->seqno = htons(seqno); | 268 | frag2->seqno = htons(seqno); |
273 | 269 | ||
274 | send_skb_packet(skb, batman_if, dstaddr); | 270 | send_skb_packet(skb, hard_iface, dstaddr); |
275 | send_skb_packet(frag_skb, batman_if, dstaddr); | 271 | send_skb_packet(frag_skb, hard_iface, dstaddr); |
276 | return NET_RX_SUCCESS; | 272 | return NET_RX_SUCCESS; |
277 | 273 | ||
278 | drop_frag: | 274 | drop_frag: |
@@ -286,40 +282,37 @@ int unicast_send_skb(struct sk_buff *skb, struct bat_priv *bat_priv) | |||
286 | { | 282 | { |
287 | struct ethhdr *ethhdr = (struct ethhdr *)skb->data; | 283 | struct ethhdr *ethhdr = (struct ethhdr *)skb->data; |
288 | struct unicast_packet *unicast_packet; | 284 | struct unicast_packet *unicast_packet; |
289 | struct orig_node *orig_node = NULL; | 285 | struct orig_node *orig_node; |
290 | struct batman_if *batman_if; | 286 | struct neigh_node *neigh_node; |
291 | struct neigh_node *router; | ||
292 | int data_len = skb->len; | 287 | int data_len = skb->len; |
293 | uint8_t dstaddr[6]; | 288 | int ret = 1; |
294 | |||
295 | spin_lock_bh(&bat_priv->orig_hash_lock); | ||
296 | 289 | ||
297 | /* get routing information */ | 290 | /* get routing information */ |
298 | if (is_multicast_ether_addr(ethhdr->h_dest)) | 291 | if (is_multicast_ether_addr(ethhdr->h_dest)) { |
299 | orig_node = (struct orig_node *)gw_get_selected(bat_priv); | 292 | orig_node = (struct orig_node *)gw_get_selected(bat_priv); |
293 | if (orig_node) | ||
294 | goto find_router; | ||
295 | } | ||
300 | 296 | ||
301 | /* check for hna host */ | 297 | /* check for hna host - increases orig_node refcount */ |
302 | if (!orig_node) | 298 | orig_node = transtable_search(bat_priv, ethhdr->h_dest); |
303 | orig_node = transtable_search(bat_priv, ethhdr->h_dest); | ||
304 | |||
305 | router = find_router(bat_priv, orig_node, NULL); | ||
306 | |||
307 | if (!router) | ||
308 | goto unlock; | ||
309 | |||
310 | /* don't lock while sending the packets ... we therefore | ||
311 | * copy the required data before sending */ | ||
312 | 299 | ||
313 | batman_if = router->if_incoming; | 300 | find_router: |
314 | memcpy(dstaddr, router->addr, ETH_ALEN); | 301 | /** |
302 | * find_router(): | ||
303 | * - if orig_node is NULL it returns NULL | ||
304 | * - increases neigh_nodes refcount if found. | ||
305 | */ | ||
306 | neigh_node = find_router(bat_priv, orig_node, NULL); | ||
315 | 307 | ||
316 | spin_unlock_bh(&bat_priv->orig_hash_lock); | 308 | if (!neigh_node) |
309 | goto out; | ||
317 | 310 | ||
318 | if (batman_if->if_status != IF_ACTIVE) | 311 | if (neigh_node->if_incoming->if_status != IF_ACTIVE) |
319 | goto dropped; | 312 | goto out; |
320 | 313 | ||
321 | if (my_skb_head_push(skb, sizeof(struct unicast_packet)) < 0) | 314 | if (my_skb_head_push(skb, sizeof(struct unicast_packet)) < 0) |
322 | goto dropped; | 315 | goto out; |
323 | 316 | ||
324 | unicast_packet = (struct unicast_packet *)skb->data; | 317 | unicast_packet = (struct unicast_packet *)skb->data; |
325 | 318 | ||
@@ -333,18 +326,24 @@ int unicast_send_skb(struct sk_buff *skb, struct bat_priv *bat_priv) | |||
333 | 326 | ||
334 | if (atomic_read(&bat_priv->fragmentation) && | 327 | if (atomic_read(&bat_priv->fragmentation) && |
335 | data_len + sizeof(struct unicast_packet) > | 328 | data_len + sizeof(struct unicast_packet) > |
336 | batman_if->net_dev->mtu) { | 329 | neigh_node->if_incoming->net_dev->mtu) { |
337 | /* send frag skb decreases ttl */ | 330 | /* send frag skb decreases ttl */ |
338 | unicast_packet->ttl++; | 331 | unicast_packet->ttl++; |
339 | return frag_send_skb(skb, bat_priv, batman_if, | 332 | ret = frag_send_skb(skb, bat_priv, |
340 | dstaddr); | 333 | neigh_node->if_incoming, neigh_node->addr); |
334 | goto out; | ||
341 | } | 335 | } |
342 | send_skb_packet(skb, batman_if, dstaddr); | ||
343 | return 0; | ||
344 | 336 | ||
345 | unlock: | 337 | send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr); |
346 | spin_unlock_bh(&bat_priv->orig_hash_lock); | 338 | ret = 0; |
347 | dropped: | 339 | goto out; |
348 | kfree_skb(skb); | 340 | |
349 | return 1; | 341 | out: |
342 | if (neigh_node) | ||
343 | neigh_node_free_ref(neigh_node); | ||
344 | if (orig_node) | ||
345 | orig_node_free_ref(orig_node); | ||
346 | if (ret == 1) | ||
347 | kfree_skb(skb); | ||
348 | return ret; | ||
350 | } | 349 | } |
diff --git a/net/batman-adv/unicast.h b/net/batman-adv/unicast.h index 8897308281d4..16ad7a9242b5 100644 --- a/net/batman-adv/unicast.h +++ b/net/batman-adv/unicast.h | |||
@@ -32,7 +32,7 @@ int frag_reassemble_skb(struct sk_buff *skb, struct bat_priv *bat_priv, | |||
32 | void frag_list_free(struct list_head *head); | 32 | void frag_list_free(struct list_head *head); |
33 | int unicast_send_skb(struct sk_buff *skb, struct bat_priv *bat_priv); | 33 | int unicast_send_skb(struct sk_buff *skb, struct bat_priv *bat_priv); |
34 | int frag_send_skb(struct sk_buff *skb, struct bat_priv *bat_priv, | 34 | int frag_send_skb(struct sk_buff *skb, struct bat_priv *bat_priv, |
35 | struct batman_if *batman_if, uint8_t dstaddr[]); | 35 | struct hard_iface *hard_iface, uint8_t dstaddr[]); |
36 | 36 | ||
37 | static inline int frag_can_reassemble(struct sk_buff *skb, int mtu) | 37 | static inline int frag_can_reassemble(struct sk_buff *skb, int mtu) |
38 | { | 38 | { |
diff --git a/net/batman-adv/vis.c b/net/batman-adv/vis.c index 7db9ad82cc00..f90212f42082 100644 --- a/net/batman-adv/vis.c +++ b/net/batman-adv/vis.c | |||
@@ -68,15 +68,16 @@ static void free_info(struct kref *ref) | |||
68 | } | 68 | } |
69 | 69 | ||
70 | /* Compare two vis packets, used by the hashing algorithm */ | 70 | /* Compare two vis packets, used by the hashing algorithm */ |
71 | static int vis_info_cmp(void *data1, void *data2) | 71 | static int vis_info_cmp(struct hlist_node *node, void *data2) |
72 | { | 72 | { |
73 | struct vis_info *d1, *d2; | 73 | struct vis_info *d1, *d2; |
74 | struct vis_packet *p1, *p2; | 74 | struct vis_packet *p1, *p2; |
75 | d1 = data1; | 75 | |
76 | d1 = container_of(node, struct vis_info, hash_entry); | ||
76 | d2 = data2; | 77 | d2 = data2; |
77 | p1 = (struct vis_packet *)d1->skb_packet->data; | 78 | p1 = (struct vis_packet *)d1->skb_packet->data; |
78 | p2 = (struct vis_packet *)d2->skb_packet->data; | 79 | p2 = (struct vis_packet *)d2->skb_packet->data; |
79 | return compare_orig(p1->vis_orig, p2->vis_orig); | 80 | return compare_eth(p1->vis_orig, p2->vis_orig); |
80 | } | 81 | } |
81 | 82 | ||
82 | /* hash function to choose an entry in a hash table of given size */ | 83 | /* hash function to choose an entry in a hash table of given size */ |
@@ -104,6 +105,34 @@ static int vis_info_choose(void *data, int size) | |||
104 | return hash % size; | 105 | return hash % size; |
105 | } | 106 | } |
106 | 107 | ||
108 | static struct vis_info *vis_hash_find(struct bat_priv *bat_priv, | ||
109 | void *data) | ||
110 | { | ||
111 | struct hashtable_t *hash = bat_priv->vis_hash; | ||
112 | struct hlist_head *head; | ||
113 | struct hlist_node *node; | ||
114 | struct vis_info *vis_info, *vis_info_tmp = NULL; | ||
115 | int index; | ||
116 | |||
117 | if (!hash) | ||
118 | return NULL; | ||
119 | |||
120 | index = vis_info_choose(data, hash->size); | ||
121 | head = &hash->table[index]; | ||
122 | |||
123 | rcu_read_lock(); | ||
124 | hlist_for_each_entry_rcu(vis_info, node, head, hash_entry) { | ||
125 | if (!vis_info_cmp(node, data)) | ||
126 | continue; | ||
127 | |||
128 | vis_info_tmp = vis_info; | ||
129 | break; | ||
130 | } | ||
131 | rcu_read_unlock(); | ||
132 | |||
133 | return vis_info_tmp; | ||
134 | } | ||
135 | |||
107 | /* insert interface to the list of interfaces of one originator, if it | 136 | /* insert interface to the list of interfaces of one originator, if it |
108 | * does not already exist in the list */ | 137 | * does not already exist in the list */ |
109 | static void vis_data_insert_interface(const uint8_t *interface, | 138 | static void vis_data_insert_interface(const uint8_t *interface, |
@@ -114,7 +143,7 @@ static void vis_data_insert_interface(const uint8_t *interface, | |||
114 | struct hlist_node *pos; | 143 | struct hlist_node *pos; |
115 | 144 | ||
116 | hlist_for_each_entry(entry, pos, if_list, list) { | 145 | hlist_for_each_entry(entry, pos, if_list, list) { |
117 | if (compare_orig(entry->addr, (void *)interface)) | 146 | if (compare_eth(entry->addr, (void *)interface)) |
118 | return; | 147 | return; |
119 | } | 148 | } |
120 | 149 | ||
@@ -166,7 +195,7 @@ static ssize_t vis_data_read_entry(char *buff, struct vis_info_entry *entry, | |||
166 | /* maximal length: max(4+17+2, 3+17+1+3+2) == 26 */ | 195 | /* maximal length: max(4+17+2, 3+17+1+3+2) == 26 */ |
167 | if (primary && entry->quality == 0) | 196 | if (primary && entry->quality == 0) |
168 | return sprintf(buff, "HNA %pM, ", entry->dest); | 197 | return sprintf(buff, "HNA %pM, ", entry->dest); |
169 | else if (compare_orig(entry->src, src)) | 198 | else if (compare_eth(entry->src, src)) |
170 | return sprintf(buff, "TQ %pM %d, ", entry->dest, | 199 | return sprintf(buff, "TQ %pM %d, ", entry->dest, |
171 | entry->quality); | 200 | entry->quality); |
172 | 201 | ||
@@ -175,9 +204,8 @@ static ssize_t vis_data_read_entry(char *buff, struct vis_info_entry *entry, | |||
175 | 204 | ||
176 | int vis_seq_print_text(struct seq_file *seq, void *offset) | 205 | int vis_seq_print_text(struct seq_file *seq, void *offset) |
177 | { | 206 | { |
178 | struct hlist_node *walk; | 207 | struct hlist_node *node; |
179 | struct hlist_head *head; | 208 | struct hlist_head *head; |
180 | struct element_t *bucket; | ||
181 | struct vis_info *info; | 209 | struct vis_info *info; |
182 | struct vis_packet *packet; | 210 | struct vis_packet *packet; |
183 | struct vis_info_entry *entries; | 211 | struct vis_info_entry *entries; |
@@ -203,8 +231,8 @@ int vis_seq_print_text(struct seq_file *seq, void *offset) | |||
203 | for (i = 0; i < hash->size; i++) { | 231 | for (i = 0; i < hash->size; i++) { |
204 | head = &hash->table[i]; | 232 | head = &hash->table[i]; |
205 | 233 | ||
206 | hlist_for_each_entry(bucket, walk, head, hlist) { | 234 | rcu_read_lock(); |
207 | info = bucket->data; | 235 | hlist_for_each_entry_rcu(info, node, head, hash_entry) { |
208 | packet = (struct vis_packet *)info->skb_packet->data; | 236 | packet = (struct vis_packet *)info->skb_packet->data; |
209 | entries = (struct vis_info_entry *) | 237 | entries = (struct vis_info_entry *) |
210 | ((char *)packet + sizeof(struct vis_packet)); | 238 | ((char *)packet + sizeof(struct vis_packet)); |
@@ -213,7 +241,7 @@ int vis_seq_print_text(struct seq_file *seq, void *offset) | |||
213 | if (entries[j].quality == 0) | 241 | if (entries[j].quality == 0) |
214 | continue; | 242 | continue; |
215 | compare = | 243 | compare = |
216 | compare_orig(entries[j].src, packet->vis_orig); | 244 | compare_eth(entries[j].src, packet->vis_orig); |
217 | vis_data_insert_interface(entries[j].src, | 245 | vis_data_insert_interface(entries[j].src, |
218 | &vis_if_list, | 246 | &vis_if_list, |
219 | compare); | 247 | compare); |
@@ -223,7 +251,7 @@ int vis_seq_print_text(struct seq_file *seq, void *offset) | |||
223 | buf_size += 18 + 26 * packet->entries; | 251 | buf_size += 18 + 26 * packet->entries; |
224 | 252 | ||
225 | /* add primary/secondary records */ | 253 | /* add primary/secondary records */ |
226 | if (compare_orig(entry->addr, packet->vis_orig)) | 254 | if (compare_eth(entry->addr, packet->vis_orig)) |
227 | buf_size += | 255 | buf_size += |
228 | vis_data_count_prim_sec(&vis_if_list); | 256 | vis_data_count_prim_sec(&vis_if_list); |
229 | 257 | ||
@@ -236,6 +264,7 @@ int vis_seq_print_text(struct seq_file *seq, void *offset) | |||
236 | kfree(entry); | 264 | kfree(entry); |
237 | } | 265 | } |
238 | } | 266 | } |
267 | rcu_read_unlock(); | ||
239 | } | 268 | } |
240 | 269 | ||
241 | buff = kmalloc(buf_size, GFP_ATOMIC); | 270 | buff = kmalloc(buf_size, GFP_ATOMIC); |
@@ -249,8 +278,8 @@ int vis_seq_print_text(struct seq_file *seq, void *offset) | |||
249 | for (i = 0; i < hash->size; i++) { | 278 | for (i = 0; i < hash->size; i++) { |
250 | head = &hash->table[i]; | 279 | head = &hash->table[i]; |
251 | 280 | ||
252 | hlist_for_each_entry(bucket, walk, head, hlist) { | 281 | rcu_read_lock(); |
253 | info = bucket->data; | 282 | hlist_for_each_entry_rcu(info, node, head, hash_entry) { |
254 | packet = (struct vis_packet *)info->skb_packet->data; | 283 | packet = (struct vis_packet *)info->skb_packet->data; |
255 | entries = (struct vis_info_entry *) | 284 | entries = (struct vis_info_entry *) |
256 | ((char *)packet + sizeof(struct vis_packet)); | 285 | ((char *)packet + sizeof(struct vis_packet)); |
@@ -259,7 +288,7 @@ int vis_seq_print_text(struct seq_file *seq, void *offset) | |||
259 | if (entries[j].quality == 0) | 288 | if (entries[j].quality == 0) |
260 | continue; | 289 | continue; |
261 | compare = | 290 | compare = |
262 | compare_orig(entries[j].src, packet->vis_orig); | 291 | compare_eth(entries[j].src, packet->vis_orig); |
263 | vis_data_insert_interface(entries[j].src, | 292 | vis_data_insert_interface(entries[j].src, |
264 | &vis_if_list, | 293 | &vis_if_list, |
265 | compare); | 294 | compare); |
@@ -277,7 +306,7 @@ int vis_seq_print_text(struct seq_file *seq, void *offset) | |||
277 | entry->primary); | 306 | entry->primary); |
278 | 307 | ||
279 | /* add primary/secondary records */ | 308 | /* add primary/secondary records */ |
280 | if (compare_orig(entry->addr, packet->vis_orig)) | 309 | if (compare_eth(entry->addr, packet->vis_orig)) |
281 | buff_pos += | 310 | buff_pos += |
282 | vis_data_read_prim_sec(buff + buff_pos, | 311 | vis_data_read_prim_sec(buff + buff_pos, |
283 | &vis_if_list); | 312 | &vis_if_list); |
@@ -291,6 +320,7 @@ int vis_seq_print_text(struct seq_file *seq, void *offset) | |||
291 | kfree(entry); | 320 | kfree(entry); |
292 | } | 321 | } |
293 | } | 322 | } |
323 | rcu_read_unlock(); | ||
294 | } | 324 | } |
295 | 325 | ||
296 | spin_unlock_bh(&bat_priv->vis_hash_lock); | 326 | spin_unlock_bh(&bat_priv->vis_hash_lock); |
@@ -345,7 +375,7 @@ static int recv_list_is_in(struct bat_priv *bat_priv, | |||
345 | 375 | ||
346 | spin_lock_bh(&bat_priv->vis_list_lock); | 376 | spin_lock_bh(&bat_priv->vis_list_lock); |
347 | list_for_each_entry(entry, recv_list, list) { | 377 | list_for_each_entry(entry, recv_list, list) { |
348 | if (memcmp(entry->mac, mac, ETH_ALEN) == 0) { | 378 | if (compare_eth(entry->mac, mac)) { |
349 | spin_unlock_bh(&bat_priv->vis_list_lock); | 379 | spin_unlock_bh(&bat_priv->vis_list_lock); |
350 | return 1; | 380 | return 1; |
351 | } | 381 | } |
@@ -381,8 +411,7 @@ static struct vis_info *add_packet(struct bat_priv *bat_priv, | |||
381 | sizeof(struct vis_packet)); | 411 | sizeof(struct vis_packet)); |
382 | 412 | ||
383 | memcpy(search_packet->vis_orig, vis_packet->vis_orig, ETH_ALEN); | 413 | memcpy(search_packet->vis_orig, vis_packet->vis_orig, ETH_ALEN); |
384 | old_info = hash_find(bat_priv->vis_hash, vis_info_cmp, vis_info_choose, | 414 | old_info = vis_hash_find(bat_priv, &search_elem); |
385 | &search_elem); | ||
386 | kfree_skb(search_elem.skb_packet); | 415 | kfree_skb(search_elem.skb_packet); |
387 | 416 | ||
388 | if (old_info) { | 417 | if (old_info) { |
@@ -442,7 +471,7 @@ static struct vis_info *add_packet(struct bat_priv *bat_priv, | |||
442 | 471 | ||
443 | /* try to add it */ | 472 | /* try to add it */ |
444 | hash_added = hash_add(bat_priv->vis_hash, vis_info_cmp, vis_info_choose, | 473 | hash_added = hash_add(bat_priv->vis_hash, vis_info_cmp, vis_info_choose, |
445 | info); | 474 | info, &info->hash_entry); |
446 | if (hash_added < 0) { | 475 | if (hash_added < 0) { |
447 | /* did not work (for some reason) */ | 476 | /* did not work (for some reason) */ |
448 | kref_put(&info->refcount, free_info); | 477 | kref_put(&info->refcount, free_info); |
@@ -529,9 +558,8 @@ static int find_best_vis_server(struct bat_priv *bat_priv, | |||
529 | struct vis_info *info) | 558 | struct vis_info *info) |
530 | { | 559 | { |
531 | struct hashtable_t *hash = bat_priv->orig_hash; | 560 | struct hashtable_t *hash = bat_priv->orig_hash; |
532 | struct hlist_node *walk; | 561 | struct hlist_node *node; |
533 | struct hlist_head *head; | 562 | struct hlist_head *head; |
534 | struct element_t *bucket; | ||
535 | struct orig_node *orig_node; | 563 | struct orig_node *orig_node; |
536 | struct vis_packet *packet; | 564 | struct vis_packet *packet; |
537 | int best_tq = -1, i; | 565 | int best_tq = -1, i; |
@@ -541,16 +569,17 @@ static int find_best_vis_server(struct bat_priv *bat_priv, | |||
541 | for (i = 0; i < hash->size; i++) { | 569 | for (i = 0; i < hash->size; i++) { |
542 | head = &hash->table[i]; | 570 | head = &hash->table[i]; |
543 | 571 | ||
544 | hlist_for_each_entry(bucket, walk, head, hlist) { | 572 | rcu_read_lock(); |
545 | orig_node = bucket->data; | 573 | hlist_for_each_entry_rcu(orig_node, node, head, hash_entry) { |
546 | if ((orig_node) && (orig_node->router) && | 574 | if ((orig_node) && (orig_node->router) && |
547 | (orig_node->flags & VIS_SERVER) && | 575 | (orig_node->flags & VIS_SERVER) && |
548 | (orig_node->router->tq_avg > best_tq)) { | 576 | (orig_node->router->tq_avg > best_tq)) { |
549 | best_tq = orig_node->router->tq_avg; | 577 | best_tq = orig_node->router->tq_avg; |
550 | memcpy(packet->target_orig, orig_node->orig, | 578 | memcpy(packet->target_orig, orig_node->orig, |
551 | ETH_ALEN); | 579 | ETH_ALEN); |
552 | } | 580 | } |
553 | } | 581 | } |
582 | rcu_read_unlock(); | ||
554 | } | 583 | } |
555 | 584 | ||
556 | return best_tq; | 585 | return best_tq; |
@@ -573,9 +602,8 @@ static bool vis_packet_full(struct vis_info *info) | |||
573 | static int generate_vis_packet(struct bat_priv *bat_priv) | 602 | static int generate_vis_packet(struct bat_priv *bat_priv) |
574 | { | 603 | { |
575 | struct hashtable_t *hash = bat_priv->orig_hash; | 604 | struct hashtable_t *hash = bat_priv->orig_hash; |
576 | struct hlist_node *walk; | 605 | struct hlist_node *node; |
577 | struct hlist_head *head; | 606 | struct hlist_head *head; |
578 | struct element_t *bucket; | ||
579 | struct orig_node *orig_node; | 607 | struct orig_node *orig_node; |
580 | struct neigh_node *neigh_node; | 608 | struct neigh_node *neigh_node; |
581 | struct vis_info *info = (struct vis_info *)bat_priv->my_vis_info; | 609 | struct vis_info *info = (struct vis_info *)bat_priv->my_vis_info; |
@@ -587,7 +615,6 @@ static int generate_vis_packet(struct bat_priv *bat_priv) | |||
587 | info->first_seen = jiffies; | 615 | info->first_seen = jiffies; |
588 | packet->vis_type = atomic_read(&bat_priv->vis_mode); | 616 | packet->vis_type = atomic_read(&bat_priv->vis_mode); |
589 | 617 | ||
590 | spin_lock_bh(&bat_priv->orig_hash_lock); | ||
591 | memcpy(packet->target_orig, broadcast_addr, ETH_ALEN); | 618 | memcpy(packet->target_orig, broadcast_addr, ETH_ALEN); |
592 | packet->ttl = TTL; | 619 | packet->ttl = TTL; |
593 | packet->seqno = htonl(ntohl(packet->seqno) + 1); | 620 | packet->seqno = htonl(ntohl(packet->seqno) + 1); |
@@ -597,23 +624,21 @@ static int generate_vis_packet(struct bat_priv *bat_priv) | |||
597 | if (packet->vis_type == VIS_TYPE_CLIENT_UPDATE) { | 624 | if (packet->vis_type == VIS_TYPE_CLIENT_UPDATE) { |
598 | best_tq = find_best_vis_server(bat_priv, info); | 625 | best_tq = find_best_vis_server(bat_priv, info); |
599 | 626 | ||
600 | if (best_tq < 0) { | 627 | if (best_tq < 0) |
601 | spin_unlock_bh(&bat_priv->orig_hash_lock); | ||
602 | return -1; | 628 | return -1; |
603 | } | ||
604 | } | 629 | } |
605 | 630 | ||
606 | for (i = 0; i < hash->size; i++) { | 631 | for (i = 0; i < hash->size; i++) { |
607 | head = &hash->table[i]; | 632 | head = &hash->table[i]; |
608 | 633 | ||
609 | hlist_for_each_entry(bucket, walk, head, hlist) { | 634 | rcu_read_lock(); |
610 | orig_node = bucket->data; | 635 | hlist_for_each_entry_rcu(orig_node, node, head, hash_entry) { |
611 | neigh_node = orig_node->router; | 636 | neigh_node = orig_node->router; |
612 | 637 | ||
613 | if (!neigh_node) | 638 | if (!neigh_node) |
614 | continue; | 639 | continue; |
615 | 640 | ||
616 | if (!compare_orig(neigh_node->addr, orig_node->orig)) | 641 | if (!compare_eth(neigh_node->addr, orig_node->orig)) |
617 | continue; | 642 | continue; |
618 | 643 | ||
619 | if (neigh_node->if_incoming->if_status != IF_ACTIVE) | 644 | if (neigh_node->if_incoming->if_status != IF_ACTIVE) |
@@ -632,23 +657,19 @@ static int generate_vis_packet(struct bat_priv *bat_priv) | |||
632 | entry->quality = neigh_node->tq_avg; | 657 | entry->quality = neigh_node->tq_avg; |
633 | packet->entries++; | 658 | packet->entries++; |
634 | 659 | ||
635 | if (vis_packet_full(info)) { | 660 | if (vis_packet_full(info)) |
636 | spin_unlock_bh(&bat_priv->orig_hash_lock); | 661 | goto unlock; |
637 | return 0; | ||
638 | } | ||
639 | } | 662 | } |
663 | rcu_read_unlock(); | ||
640 | } | 664 | } |
641 | 665 | ||
642 | spin_unlock_bh(&bat_priv->orig_hash_lock); | ||
643 | |||
644 | hash = bat_priv->hna_local_hash; | 666 | hash = bat_priv->hna_local_hash; |
645 | 667 | ||
646 | spin_lock_bh(&bat_priv->hna_lhash_lock); | 668 | spin_lock_bh(&bat_priv->hna_lhash_lock); |
647 | for (i = 0; i < hash->size; i++) { | 669 | for (i = 0; i < hash->size; i++) { |
648 | head = &hash->table[i]; | 670 | head = &hash->table[i]; |
649 | 671 | ||
650 | hlist_for_each_entry(bucket, walk, head, hlist) { | 672 | hlist_for_each_entry(hna_local_entry, node, head, hash_entry) { |
651 | hna_local_entry = bucket->data; | ||
652 | entry = (struct vis_info_entry *) | 673 | entry = (struct vis_info_entry *) |
653 | skb_put(info->skb_packet, | 674 | skb_put(info->skb_packet, |
654 | sizeof(*entry)); | 675 | sizeof(*entry)); |
@@ -666,6 +687,10 @@ static int generate_vis_packet(struct bat_priv *bat_priv) | |||
666 | 687 | ||
667 | spin_unlock_bh(&bat_priv->hna_lhash_lock); | 688 | spin_unlock_bh(&bat_priv->hna_lhash_lock); |
668 | return 0; | 689 | return 0; |
690 | |||
691 | unlock: | ||
692 | rcu_read_unlock(); | ||
693 | return 0; | ||
669 | } | 694 | } |
670 | 695 | ||
671 | /* free old vis packets. Must be called with this vis_hash_lock | 696 | /* free old vis packets. Must be called with this vis_hash_lock |
@@ -674,25 +699,22 @@ static void purge_vis_packets(struct bat_priv *bat_priv) | |||
674 | { | 699 | { |
675 | int i; | 700 | int i; |
676 | struct hashtable_t *hash = bat_priv->vis_hash; | 701 | struct hashtable_t *hash = bat_priv->vis_hash; |
677 | struct hlist_node *walk, *safe; | 702 | struct hlist_node *node, *node_tmp; |
678 | struct hlist_head *head; | 703 | struct hlist_head *head; |
679 | struct element_t *bucket; | ||
680 | struct vis_info *info; | 704 | struct vis_info *info; |
681 | 705 | ||
682 | for (i = 0; i < hash->size; i++) { | 706 | for (i = 0; i < hash->size; i++) { |
683 | head = &hash->table[i]; | 707 | head = &hash->table[i]; |
684 | 708 | ||
685 | hlist_for_each_entry_safe(bucket, walk, safe, head, hlist) { | 709 | hlist_for_each_entry_safe(info, node, node_tmp, |
686 | info = bucket->data; | 710 | head, hash_entry) { |
687 | |||
688 | /* never purge own data. */ | 711 | /* never purge own data. */ |
689 | if (info == bat_priv->my_vis_info) | 712 | if (info == bat_priv->my_vis_info) |
690 | continue; | 713 | continue; |
691 | 714 | ||
692 | if (time_after(jiffies, | 715 | if (time_after(jiffies, |
693 | info->first_seen + VIS_TIMEOUT * HZ)) { | 716 | info->first_seen + VIS_TIMEOUT * HZ)) { |
694 | hlist_del(walk); | 717 | hlist_del(node); |
695 | kfree(bucket); | ||
696 | send_list_del(info); | 718 | send_list_del(info); |
697 | kref_put(&info->refcount, free_info); | 719 | kref_put(&info->refcount, free_info); |
698 | } | 720 | } |
@@ -704,27 +726,24 @@ static void broadcast_vis_packet(struct bat_priv *bat_priv, | |||
704 | struct vis_info *info) | 726 | struct vis_info *info) |
705 | { | 727 | { |
706 | struct hashtable_t *hash = bat_priv->orig_hash; | 728 | struct hashtable_t *hash = bat_priv->orig_hash; |
707 | struct hlist_node *walk; | 729 | struct hlist_node *node; |
708 | struct hlist_head *head; | 730 | struct hlist_head *head; |
709 | struct element_t *bucket; | ||
710 | struct orig_node *orig_node; | 731 | struct orig_node *orig_node; |
711 | struct vis_packet *packet; | 732 | struct vis_packet *packet; |
712 | struct sk_buff *skb; | 733 | struct sk_buff *skb; |
713 | struct batman_if *batman_if; | 734 | struct hard_iface *hard_iface; |
714 | uint8_t dstaddr[ETH_ALEN]; | 735 | uint8_t dstaddr[ETH_ALEN]; |
715 | int i; | 736 | int i; |
716 | 737 | ||
717 | 738 | ||
718 | spin_lock_bh(&bat_priv->orig_hash_lock); | ||
719 | packet = (struct vis_packet *)info->skb_packet->data; | 739 | packet = (struct vis_packet *)info->skb_packet->data; |
720 | 740 | ||
721 | /* send to all routers in range. */ | 741 | /* send to all routers in range. */ |
722 | for (i = 0; i < hash->size; i++) { | 742 | for (i = 0; i < hash->size; i++) { |
723 | head = &hash->table[i]; | 743 | head = &hash->table[i]; |
724 | 744 | ||
725 | hlist_for_each_entry(bucket, walk, head, hlist) { | 745 | rcu_read_lock(); |
726 | orig_node = bucket->data; | 746 | hlist_for_each_entry_rcu(orig_node, node, head, hash_entry) { |
727 | |||
728 | /* if it's a vis server and reachable, send it. */ | 747 | /* if it's a vis server and reachable, send it. */ |
729 | if ((!orig_node) || (!orig_node->router)) | 748 | if ((!orig_node) || (!orig_node->router)) |
730 | continue; | 749 | continue; |
@@ -737,54 +756,61 @@ static void broadcast_vis_packet(struct bat_priv *bat_priv, | |||
737 | continue; | 756 | continue; |
738 | 757 | ||
739 | memcpy(packet->target_orig, orig_node->orig, ETH_ALEN); | 758 | memcpy(packet->target_orig, orig_node->orig, ETH_ALEN); |
740 | batman_if = orig_node->router->if_incoming; | 759 | hard_iface = orig_node->router->if_incoming; |
741 | memcpy(dstaddr, orig_node->router->addr, ETH_ALEN); | 760 | memcpy(dstaddr, orig_node->router->addr, ETH_ALEN); |
742 | spin_unlock_bh(&bat_priv->orig_hash_lock); | ||
743 | 761 | ||
744 | skb = skb_clone(info->skb_packet, GFP_ATOMIC); | 762 | skb = skb_clone(info->skb_packet, GFP_ATOMIC); |
745 | if (skb) | 763 | if (skb) |
746 | send_skb_packet(skb, batman_if, dstaddr); | 764 | send_skb_packet(skb, hard_iface, dstaddr); |
747 | 765 | ||
748 | spin_lock_bh(&bat_priv->orig_hash_lock); | ||
749 | } | 766 | } |
750 | 767 | rcu_read_unlock(); | |
751 | } | 768 | } |
752 | |||
753 | spin_unlock_bh(&bat_priv->orig_hash_lock); | ||
754 | } | 769 | } |
755 | 770 | ||
756 | static void unicast_vis_packet(struct bat_priv *bat_priv, | 771 | static void unicast_vis_packet(struct bat_priv *bat_priv, |
757 | struct vis_info *info) | 772 | struct vis_info *info) |
758 | { | 773 | { |
759 | struct orig_node *orig_node; | 774 | struct orig_node *orig_node; |
775 | struct neigh_node *neigh_node = NULL; | ||
760 | struct sk_buff *skb; | 776 | struct sk_buff *skb; |
761 | struct vis_packet *packet; | 777 | struct vis_packet *packet; |
762 | struct batman_if *batman_if; | ||
763 | uint8_t dstaddr[ETH_ALEN]; | ||
764 | 778 | ||
765 | spin_lock_bh(&bat_priv->orig_hash_lock); | ||
766 | packet = (struct vis_packet *)info->skb_packet->data; | 779 | packet = (struct vis_packet *)info->skb_packet->data; |
767 | orig_node = ((struct orig_node *)hash_find(bat_priv->orig_hash, | ||
768 | compare_orig, choose_orig, | ||
769 | packet->target_orig)); | ||
770 | 780 | ||
771 | if ((!orig_node) || (!orig_node->router)) | 781 | rcu_read_lock(); |
772 | goto out; | 782 | orig_node = orig_hash_find(bat_priv, packet->target_orig); |
773 | 783 | ||
774 | /* don't lock while sending the packets ... we therefore | 784 | if (!orig_node) |
775 | * copy the required data before sending */ | 785 | goto unlock; |
776 | batman_if = orig_node->router->if_incoming; | 786 | |
777 | memcpy(dstaddr, orig_node->router->addr, ETH_ALEN); | 787 | neigh_node = orig_node->router; |
778 | spin_unlock_bh(&bat_priv->orig_hash_lock); | 788 | |
789 | if (!neigh_node) | ||
790 | goto unlock; | ||
791 | |||
792 | if (!atomic_inc_not_zero(&neigh_node->refcount)) { | ||
793 | neigh_node = NULL; | ||
794 | goto unlock; | ||
795 | } | ||
796 | |||
797 | rcu_read_unlock(); | ||
779 | 798 | ||
780 | skb = skb_clone(info->skb_packet, GFP_ATOMIC); | 799 | skb = skb_clone(info->skb_packet, GFP_ATOMIC); |
781 | if (skb) | 800 | if (skb) |
782 | send_skb_packet(skb, batman_if, dstaddr); | 801 | send_skb_packet(skb, neigh_node->if_incoming, |
802 | neigh_node->addr); | ||
783 | 803 | ||
784 | return; | 804 | goto out; |
785 | 805 | ||
806 | unlock: | ||
807 | rcu_read_unlock(); | ||
786 | out: | 808 | out: |
787 | spin_unlock_bh(&bat_priv->orig_hash_lock); | 809 | if (neigh_node) |
810 | neigh_node_free_ref(neigh_node); | ||
811 | if (orig_node) | ||
812 | orig_node_free_ref(orig_node); | ||
813 | return; | ||
788 | } | 814 | } |
789 | 815 | ||
790 | /* only send one vis packet. called from send_vis_packets() */ | 816 | /* only send one vis packet. called from send_vis_packets() */ |
@@ -896,7 +922,8 @@ int vis_init(struct bat_priv *bat_priv) | |||
896 | INIT_LIST_HEAD(&bat_priv->vis_send_list); | 922 | INIT_LIST_HEAD(&bat_priv->vis_send_list); |
897 | 923 | ||
898 | hash_added = hash_add(bat_priv->vis_hash, vis_info_cmp, vis_info_choose, | 924 | hash_added = hash_add(bat_priv->vis_hash, vis_info_cmp, vis_info_choose, |
899 | bat_priv->my_vis_info); | 925 | bat_priv->my_vis_info, |
926 | &bat_priv->my_vis_info->hash_entry); | ||
900 | if (hash_added < 0) { | 927 | if (hash_added < 0) { |
901 | pr_err("Can't add own vis packet into hash\n"); | 928 | pr_err("Can't add own vis packet into hash\n"); |
902 | /* not in hash, need to remove it manually. */ | 929 | /* not in hash, need to remove it manually. */ |
@@ -918,10 +945,11 @@ err: | |||
918 | } | 945 | } |
919 | 946 | ||
920 | /* Decrease the reference count on a hash item info */ | 947 | /* Decrease the reference count on a hash item info */ |
921 | static void free_info_ref(void *data, void *arg) | 948 | static void free_info_ref(struct hlist_node *node, void *arg) |
922 | { | 949 | { |
923 | struct vis_info *info = data; | 950 | struct vis_info *info; |
924 | 951 | ||
952 | info = container_of(node, struct vis_info, hash_entry); | ||
925 | send_list_del(info); | 953 | send_list_del(info); |
926 | kref_put(&info->refcount, free_info); | 954 | kref_put(&info->refcount, free_info); |
927 | } | 955 | } |