diff options
author | Marek Lindner <lindner_marek@yahoo.de> | 2011-02-10 09:33:53 -0500 |
---|---|---|
committer | Marek Lindner <lindner_marek@yahoo.de> | 2011-03-05 06:50:03 -0500 |
commit | 44524fcdf6ca19b58c24f7622c4af1d8d8fe59f8 (patch) | |
tree | 297c76f80d68d56e3c65a23c70de645a1c93df47 /net/batman-adv/routing.c | |
parent | a4c135c561106c397bae33455acfca4aa8065a30 (diff) |
batman-adv: Correct rcu refcounting for neigh_node
It might be possible that 2 threads access the same data in the same
rcu grace period. The first thread calls call_rcu() to decrement the
refcount and free the data while the second thread increases the
refcount to use the data. To avoid this race condition all refcount
operations have to be atomic.
Reported-by: Sven Eckelmann <sven@narfation.org>
Signed-off-by: Marek Lindner <lindner_marek@yahoo.de>
Diffstat (limited to 'net/batman-adv/routing.c')
-rw-r--r-- | net/batman-adv/routing.c | 338 |
1 files changed, 222 insertions, 116 deletions
diff --git a/net/batman-adv/routing.c b/net/batman-adv/routing.c index 1ad14da20839..9185666ab3e0 100644 --- a/net/batman-adv/routing.c +++ b/net/batman-adv/routing.c | |||
@@ -121,12 +121,12 @@ static void update_route(struct bat_priv *bat_priv, | |||
121 | orig_node->router->addr); | 121 | orig_node->router->addr); |
122 | } | 122 | } |
123 | 123 | ||
124 | if (neigh_node) | 124 | if (neigh_node && !atomic_inc_not_zero(&neigh_node->refcount)) |
125 | kref_get(&neigh_node->refcount); | 125 | neigh_node = NULL; |
126 | neigh_node_tmp = orig_node->router; | 126 | neigh_node_tmp = orig_node->router; |
127 | orig_node->router = neigh_node; | 127 | orig_node->router = neigh_node; |
128 | if (neigh_node_tmp) | 128 | if (neigh_node_tmp) |
129 | kref_put(&neigh_node_tmp->refcount, neigh_node_free_ref); | 129 | neigh_node_free_ref(neigh_node_tmp); |
130 | } | 130 | } |
131 | 131 | ||
132 | 132 | ||
@@ -177,7 +177,11 @@ static int is_bidirectional_neigh(struct orig_node *orig_node, | |||
177 | if (!neigh_node) | 177 | if (!neigh_node) |
178 | goto unlock; | 178 | goto unlock; |
179 | 179 | ||
180 | kref_get(&neigh_node->refcount); | 180 | if (!atomic_inc_not_zero(&neigh_node->refcount)) { |
181 | neigh_node = NULL; | ||
182 | goto unlock; | ||
183 | } | ||
184 | |||
181 | rcu_read_unlock(); | 185 | rcu_read_unlock(); |
182 | 186 | ||
183 | neigh_node->last_valid = jiffies; | 187 | neigh_node->last_valid = jiffies; |
@@ -202,7 +206,11 @@ static int is_bidirectional_neigh(struct orig_node *orig_node, | |||
202 | if (!neigh_node) | 206 | if (!neigh_node) |
203 | goto unlock; | 207 | goto unlock; |
204 | 208 | ||
205 | kref_get(&neigh_node->refcount); | 209 | if (!atomic_inc_not_zero(&neigh_node->refcount)) { |
210 | neigh_node = NULL; | ||
211 | goto unlock; | ||
212 | } | ||
213 | |||
206 | rcu_read_unlock(); | 214 | rcu_read_unlock(); |
207 | } | 215 | } |
208 | 216 | ||
@@ -267,7 +275,7 @@ unlock: | |||
267 | rcu_read_unlock(); | 275 | rcu_read_unlock(); |
268 | out: | 276 | out: |
269 | if (neigh_node) | 277 | if (neigh_node) |
270 | kref_put(&neigh_node->refcount, neigh_node_free_ref); | 278 | neigh_node_free_ref(neigh_node); |
271 | return ret; | 279 | return ret; |
272 | } | 280 | } |
273 | 281 | ||
@@ -280,8 +288,8 @@ void bonding_candidate_del(struct orig_node *orig_node, | |||
280 | goto out; | 288 | goto out; |
281 | 289 | ||
282 | list_del_rcu(&neigh_node->bonding_list); | 290 | list_del_rcu(&neigh_node->bonding_list); |
283 | call_rcu(&neigh_node->rcu_bond, neigh_node_free_rcu_bond); | ||
284 | INIT_LIST_HEAD(&neigh_node->bonding_list); | 291 | INIT_LIST_HEAD(&neigh_node->bonding_list); |
292 | neigh_node_free_ref(neigh_node); | ||
285 | atomic_dec(&orig_node->bond_candidates); | 293 | atomic_dec(&orig_node->bond_candidates); |
286 | 294 | ||
287 | out: | 295 | out: |
@@ -342,8 +350,10 @@ static void bonding_candidate_add(struct orig_node *orig_node, | |||
342 | if (!list_empty(&neigh_node->bonding_list)) | 350 | if (!list_empty(&neigh_node->bonding_list)) |
343 | goto out; | 351 | goto out; |
344 | 352 | ||
353 | if (!atomic_inc_not_zero(&neigh_node->refcount)) | ||
354 | goto out; | ||
355 | |||
345 | list_add_rcu(&neigh_node->bonding_list, &orig_node->bond_list); | 356 | list_add_rcu(&neigh_node->bonding_list, &orig_node->bond_list); |
346 | kref_get(&neigh_node->refcount); | ||
347 | atomic_inc(&orig_node->bond_candidates); | 357 | atomic_inc(&orig_node->bond_candidates); |
348 | goto out; | 358 | goto out; |
349 | 359 | ||
@@ -387,7 +397,10 @@ static void update_orig(struct bat_priv *bat_priv, | |||
387 | hlist_for_each_entry_rcu(tmp_neigh_node, node, | 397 | hlist_for_each_entry_rcu(tmp_neigh_node, node, |
388 | &orig_node->neigh_list, list) { | 398 | &orig_node->neigh_list, list) { |
389 | if (compare_orig(tmp_neigh_node->addr, ethhdr->h_source) && | 399 | if (compare_orig(tmp_neigh_node->addr, ethhdr->h_source) && |
390 | (tmp_neigh_node->if_incoming == if_incoming)) { | 400 | (tmp_neigh_node->if_incoming == if_incoming) && |
401 | atomic_inc_not_zero(&tmp_neigh_node->refcount)) { | ||
402 | if (neigh_node) | ||
403 | neigh_node_free_ref(neigh_node); | ||
391 | neigh_node = tmp_neigh_node; | 404 | neigh_node = tmp_neigh_node; |
392 | continue; | 405 | continue; |
393 | } | 406 | } |
@@ -414,11 +427,15 @@ static void update_orig(struct bat_priv *bat_priv, | |||
414 | kref_put(&orig_tmp->refcount, orig_node_free_ref); | 427 | kref_put(&orig_tmp->refcount, orig_node_free_ref); |
415 | if (!neigh_node) | 428 | if (!neigh_node) |
416 | goto unlock; | 429 | goto unlock; |
430 | |||
431 | if (!atomic_inc_not_zero(&neigh_node->refcount)) { | ||
432 | neigh_node = NULL; | ||
433 | goto unlock; | ||
434 | } | ||
417 | } else | 435 | } else |
418 | bat_dbg(DBG_BATMAN, bat_priv, | 436 | bat_dbg(DBG_BATMAN, bat_priv, |
419 | "Updating existing last-hop neighbor of originator\n"); | 437 | "Updating existing last-hop neighbor of originator\n"); |
420 | 438 | ||
421 | kref_get(&neigh_node->refcount); | ||
422 | rcu_read_unlock(); | 439 | rcu_read_unlock(); |
423 | 440 | ||
424 | orig_node->flags = batman_packet->flags; | 441 | orig_node->flags = batman_packet->flags; |
@@ -495,7 +512,7 @@ unlock: | |||
495 | rcu_read_unlock(); | 512 | rcu_read_unlock(); |
496 | out: | 513 | out: |
497 | if (neigh_node) | 514 | if (neigh_node) |
498 | kref_put(&neigh_node->refcount, neigh_node_free_ref); | 515 | neigh_node_free_ref(neigh_node); |
499 | } | 516 | } |
500 | 517 | ||
501 | /* checks whether the host restarted and is in the protection time. | 518 | /* checks whether the host restarted and is in the protection time. |
@@ -870,22 +887,23 @@ int recv_bat_packet(struct sk_buff *skb, struct batman_if *batman_if) | |||
870 | static int recv_my_icmp_packet(struct bat_priv *bat_priv, | 887 | static int recv_my_icmp_packet(struct bat_priv *bat_priv, |
871 | struct sk_buff *skb, size_t icmp_len) | 888 | struct sk_buff *skb, size_t icmp_len) |
872 | { | 889 | { |
873 | struct orig_node *orig_node; | 890 | struct orig_node *orig_node = NULL; |
891 | struct neigh_node *neigh_node = NULL; | ||
874 | struct icmp_packet_rr *icmp_packet; | 892 | struct icmp_packet_rr *icmp_packet; |
875 | struct batman_if *batman_if; | 893 | struct batman_if *batman_if; |
876 | int ret; | ||
877 | uint8_t dstaddr[ETH_ALEN]; | 894 | uint8_t dstaddr[ETH_ALEN]; |
895 | int ret = NET_RX_DROP; | ||
878 | 896 | ||
879 | icmp_packet = (struct icmp_packet_rr *)skb->data; | 897 | icmp_packet = (struct icmp_packet_rr *)skb->data; |
880 | 898 | ||
881 | /* add data to device queue */ | 899 | /* add data to device queue */ |
882 | if (icmp_packet->msg_type != ECHO_REQUEST) { | 900 | if (icmp_packet->msg_type != ECHO_REQUEST) { |
883 | bat_socket_receive_packet(icmp_packet, icmp_len); | 901 | bat_socket_receive_packet(icmp_packet, icmp_len); |
884 | return NET_RX_DROP; | 902 | goto out; |
885 | } | 903 | } |
886 | 904 | ||
887 | if (!bat_priv->primary_if) | 905 | if (!bat_priv->primary_if) |
888 | return NET_RX_DROP; | 906 | goto out; |
889 | 907 | ||
890 | /* answer echo request (ping) */ | 908 | /* answer echo request (ping) */ |
891 | /* get routing information */ | 909 | /* get routing information */ |
@@ -894,46 +912,65 @@ static int recv_my_icmp_packet(struct bat_priv *bat_priv, | |||
894 | orig_node = ((struct orig_node *)hash_find(bat_priv->orig_hash, | 912 | orig_node = ((struct orig_node *)hash_find(bat_priv->orig_hash, |
895 | compare_orig, choose_orig, | 913 | compare_orig, choose_orig, |
896 | icmp_packet->orig)); | 914 | icmp_packet->orig)); |
897 | rcu_read_unlock(); | ||
898 | ret = NET_RX_DROP; | ||
899 | 915 | ||
900 | if ((orig_node) && (orig_node->router)) { | 916 | if (!orig_node) |
917 | goto unlock; | ||
901 | 918 | ||
902 | /* don't lock while sending the packets ... we therefore | 919 | kref_get(&orig_node->refcount); |
903 | * copy the required data before sending */ | 920 | neigh_node = orig_node->router; |
904 | batman_if = orig_node->router->if_incoming; | ||
905 | memcpy(dstaddr, orig_node->router->addr, ETH_ALEN); | ||
906 | spin_unlock_bh(&bat_priv->orig_hash_lock); | ||
907 | 921 | ||
908 | /* create a copy of the skb, if needed, to modify it. */ | 922 | if (!neigh_node) |
909 | if (skb_cow(skb, sizeof(struct ethhdr)) < 0) | 923 | goto unlock; |
910 | return NET_RX_DROP; | ||
911 | 924 | ||
912 | icmp_packet = (struct icmp_packet_rr *)skb->data; | 925 | if (!atomic_inc_not_zero(&neigh_node->refcount)) { |
926 | neigh_node = NULL; | ||
927 | goto unlock; | ||
928 | } | ||
913 | 929 | ||
914 | memcpy(icmp_packet->dst, icmp_packet->orig, ETH_ALEN); | 930 | rcu_read_unlock(); |
915 | memcpy(icmp_packet->orig, | ||
916 | bat_priv->primary_if->net_dev->dev_addr, ETH_ALEN); | ||
917 | icmp_packet->msg_type = ECHO_REPLY; | ||
918 | icmp_packet->ttl = TTL; | ||
919 | 931 | ||
920 | send_skb_packet(skb, batman_if, dstaddr); | 932 | /* don't lock while sending the packets ... we therefore |
921 | ret = NET_RX_SUCCESS; | 933 | * copy the required data before sending */ |
934 | batman_if = orig_node->router->if_incoming; | ||
935 | memcpy(dstaddr, orig_node->router->addr, ETH_ALEN); | ||
936 | spin_unlock_bh(&bat_priv->orig_hash_lock); | ||
922 | 937 | ||
923 | } else | 938 | /* create a copy of the skb, if needed, to modify it. */ |
924 | spin_unlock_bh(&bat_priv->orig_hash_lock); | 939 | if (skb_cow(skb, sizeof(struct ethhdr)) < 0) |
940 | goto out; | ||
925 | 941 | ||
942 | icmp_packet = (struct icmp_packet_rr *)skb->data; | ||
943 | |||
944 | memcpy(icmp_packet->dst, icmp_packet->orig, ETH_ALEN); | ||
945 | memcpy(icmp_packet->orig, | ||
946 | bat_priv->primary_if->net_dev->dev_addr, ETH_ALEN); | ||
947 | icmp_packet->msg_type = ECHO_REPLY; | ||
948 | icmp_packet->ttl = TTL; | ||
949 | |||
950 | send_skb_packet(skb, batman_if, dstaddr); | ||
951 | ret = NET_RX_SUCCESS; | ||
952 | goto out; | ||
953 | |||
954 | unlock: | ||
955 | rcu_read_unlock(); | ||
956 | spin_unlock_bh(&bat_priv->orig_hash_lock); | ||
957 | out: | ||
958 | if (neigh_node) | ||
959 | neigh_node_free_ref(neigh_node); | ||
960 | if (orig_node) | ||
961 | kref_put(&orig_node->refcount, orig_node_free_ref); | ||
926 | return ret; | 962 | return ret; |
927 | } | 963 | } |
928 | 964 | ||
929 | static int recv_icmp_ttl_exceeded(struct bat_priv *bat_priv, | 965 | static int recv_icmp_ttl_exceeded(struct bat_priv *bat_priv, |
930 | struct sk_buff *skb) | 966 | struct sk_buff *skb) |
931 | { | 967 | { |
932 | struct orig_node *orig_node; | 968 | struct orig_node *orig_node = NULL; |
969 | struct neigh_node *neigh_node = NULL; | ||
933 | struct icmp_packet *icmp_packet; | 970 | struct icmp_packet *icmp_packet; |
934 | struct batman_if *batman_if; | 971 | struct batman_if *batman_if; |
935 | int ret; | ||
936 | uint8_t dstaddr[ETH_ALEN]; | 972 | uint8_t dstaddr[ETH_ALEN]; |
973 | int ret = NET_RX_DROP; | ||
937 | 974 | ||
938 | icmp_packet = (struct icmp_packet *)skb->data; | 975 | icmp_packet = (struct icmp_packet *)skb->data; |
939 | 976 | ||
@@ -942,11 +979,11 @@ static int recv_icmp_ttl_exceeded(struct bat_priv *bat_priv, | |||
942 | pr_debug("Warning - can't forward icmp packet from %pM to " | 979 | pr_debug("Warning - can't forward icmp packet from %pM to " |
943 | "%pM: ttl exceeded\n", icmp_packet->orig, | 980 | "%pM: ttl exceeded\n", icmp_packet->orig, |
944 | icmp_packet->dst); | 981 | icmp_packet->dst); |
945 | return NET_RX_DROP; | 982 | goto out; |
946 | } | 983 | } |
947 | 984 | ||
948 | if (!bat_priv->primary_if) | 985 | if (!bat_priv->primary_if) |
949 | return NET_RX_DROP; | 986 | goto out; |
950 | 987 | ||
951 | /* get routing information */ | 988 | /* get routing information */ |
952 | spin_lock_bh(&bat_priv->orig_hash_lock); | 989 | spin_lock_bh(&bat_priv->orig_hash_lock); |
@@ -954,35 +991,53 @@ static int recv_icmp_ttl_exceeded(struct bat_priv *bat_priv, | |||
954 | orig_node = ((struct orig_node *) | 991 | orig_node = ((struct orig_node *) |
955 | hash_find(bat_priv->orig_hash, compare_orig, choose_orig, | 992 | hash_find(bat_priv->orig_hash, compare_orig, choose_orig, |
956 | icmp_packet->orig)); | 993 | icmp_packet->orig)); |
957 | rcu_read_unlock(); | ||
958 | ret = NET_RX_DROP; | ||
959 | 994 | ||
960 | if ((orig_node) && (orig_node->router)) { | 995 | if (!orig_node) |
996 | goto unlock; | ||
961 | 997 | ||
962 | /* don't lock while sending the packets ... we therefore | 998 | kref_get(&orig_node->refcount); |
963 | * copy the required data before sending */ | 999 | neigh_node = orig_node->router; |
964 | batman_if = orig_node->router->if_incoming; | ||
965 | memcpy(dstaddr, orig_node->router->addr, ETH_ALEN); | ||
966 | spin_unlock_bh(&bat_priv->orig_hash_lock); | ||
967 | 1000 | ||
968 | /* create a copy of the skb, if needed, to modify it. */ | 1001 | if (!neigh_node) |
969 | if (skb_cow(skb, sizeof(struct ethhdr)) < 0) | 1002 | goto unlock; |
970 | return NET_RX_DROP; | 1003 | |
1004 | if (!atomic_inc_not_zero(&neigh_node->refcount)) { | ||
1005 | neigh_node = NULL; | ||
1006 | goto unlock; | ||
1007 | } | ||
971 | 1008 | ||
972 | icmp_packet = (struct icmp_packet *) skb->data; | 1009 | rcu_read_unlock(); |
973 | 1010 | ||
974 | memcpy(icmp_packet->dst, icmp_packet->orig, ETH_ALEN); | 1011 | /* don't lock while sending the packets ... we therefore |
975 | memcpy(icmp_packet->orig, | 1012 | * copy the required data before sending */ |
976 | bat_priv->primary_if->net_dev->dev_addr, ETH_ALEN); | 1013 | batman_if = orig_node->router->if_incoming; |
977 | icmp_packet->msg_type = TTL_EXCEEDED; | 1014 | memcpy(dstaddr, orig_node->router->addr, ETH_ALEN); |
978 | icmp_packet->ttl = TTL; | 1015 | spin_unlock_bh(&bat_priv->orig_hash_lock); |
979 | 1016 | ||
980 | send_skb_packet(skb, batman_if, dstaddr); | 1017 | /* create a copy of the skb, if needed, to modify it. */ |
981 | ret = NET_RX_SUCCESS; | 1018 | if (skb_cow(skb, sizeof(struct ethhdr)) < 0) |
1019 | goto out; | ||
982 | 1020 | ||
983 | } else | 1021 | icmp_packet = (struct icmp_packet *)skb->data; |
984 | spin_unlock_bh(&bat_priv->orig_hash_lock); | 1022 | |
1023 | memcpy(icmp_packet->dst, icmp_packet->orig, ETH_ALEN); | ||
1024 | memcpy(icmp_packet->orig, | ||
1025 | bat_priv->primary_if->net_dev->dev_addr, ETH_ALEN); | ||
1026 | icmp_packet->msg_type = TTL_EXCEEDED; | ||
1027 | icmp_packet->ttl = TTL; | ||
1028 | |||
1029 | send_skb_packet(skb, batman_if, dstaddr); | ||
1030 | ret = NET_RX_SUCCESS; | ||
1031 | goto out; | ||
985 | 1032 | ||
1033 | unlock: | ||
1034 | rcu_read_unlock(); | ||
1035 | spin_unlock_bh(&bat_priv->orig_hash_lock); | ||
1036 | out: | ||
1037 | if (neigh_node) | ||
1038 | neigh_node_free_ref(neigh_node); | ||
1039 | if (orig_node) | ||
1040 | kref_put(&orig_node->refcount, orig_node_free_ref); | ||
986 | return ret; | 1041 | return ret; |
987 | } | 1042 | } |
988 | 1043 | ||
@@ -992,11 +1047,12 @@ int recv_icmp_packet(struct sk_buff *skb, struct batman_if *recv_if) | |||
992 | struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface); | 1047 | struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface); |
993 | struct icmp_packet_rr *icmp_packet; | 1048 | struct icmp_packet_rr *icmp_packet; |
994 | struct ethhdr *ethhdr; | 1049 | struct ethhdr *ethhdr; |
995 | struct orig_node *orig_node; | 1050 | struct orig_node *orig_node = NULL; |
1051 | struct neigh_node *neigh_node = NULL; | ||
996 | struct batman_if *batman_if; | 1052 | struct batman_if *batman_if; |
997 | int hdr_size = sizeof(struct icmp_packet); | 1053 | int hdr_size = sizeof(struct icmp_packet); |
998 | int ret; | ||
999 | uint8_t dstaddr[ETH_ALEN]; | 1054 | uint8_t dstaddr[ETH_ALEN]; |
1055 | int ret = NET_RX_DROP; | ||
1000 | 1056 | ||
1001 | /** | 1057 | /** |
1002 | * we truncate all incoming icmp packets if they don't match our size | 1058 | * we truncate all incoming icmp packets if they don't match our size |
@@ -1006,21 +1062,21 @@ int recv_icmp_packet(struct sk_buff *skb, struct batman_if *recv_if) | |||
1006 | 1062 | ||
1007 | /* drop packet if it has not necessary minimum size */ | 1063 | /* drop packet if it has not necessary minimum size */ |
1008 | if (unlikely(!pskb_may_pull(skb, hdr_size))) | 1064 | if (unlikely(!pskb_may_pull(skb, hdr_size))) |
1009 | return NET_RX_DROP; | 1065 | goto out; |
1010 | 1066 | ||
1011 | ethhdr = (struct ethhdr *)skb_mac_header(skb); | 1067 | ethhdr = (struct ethhdr *)skb_mac_header(skb); |
1012 | 1068 | ||
1013 | /* packet with unicast indication but broadcast recipient */ | 1069 | /* packet with unicast indication but broadcast recipient */ |
1014 | if (is_broadcast_ether_addr(ethhdr->h_dest)) | 1070 | if (is_broadcast_ether_addr(ethhdr->h_dest)) |
1015 | return NET_RX_DROP; | 1071 | goto out; |
1016 | 1072 | ||
1017 | /* packet with broadcast sender address */ | 1073 | /* packet with broadcast sender address */ |
1018 | if (is_broadcast_ether_addr(ethhdr->h_source)) | 1074 | if (is_broadcast_ether_addr(ethhdr->h_source)) |
1019 | return NET_RX_DROP; | 1075 | goto out; |
1020 | 1076 | ||
1021 | /* not for me */ | 1077 | /* not for me */ |
1022 | if (!is_my_mac(ethhdr->h_dest)) | 1078 | if (!is_my_mac(ethhdr->h_dest)) |
1023 | return NET_RX_DROP; | 1079 | goto out; |
1024 | 1080 | ||
1025 | icmp_packet = (struct icmp_packet_rr *)skb->data; | 1081 | icmp_packet = (struct icmp_packet_rr *)skb->data; |
1026 | 1082 | ||
@@ -1040,40 +1096,56 @@ int recv_icmp_packet(struct sk_buff *skb, struct batman_if *recv_if) | |||
1040 | if (icmp_packet->ttl < 2) | 1096 | if (icmp_packet->ttl < 2) |
1041 | return recv_icmp_ttl_exceeded(bat_priv, skb); | 1097 | return recv_icmp_ttl_exceeded(bat_priv, skb); |
1042 | 1098 | ||
1043 | ret = NET_RX_DROP; | ||
1044 | |||
1045 | /* get routing information */ | 1099 | /* get routing information */ |
1046 | spin_lock_bh(&bat_priv->orig_hash_lock); | 1100 | spin_lock_bh(&bat_priv->orig_hash_lock); |
1047 | rcu_read_lock(); | 1101 | rcu_read_lock(); |
1048 | orig_node = ((struct orig_node *) | 1102 | orig_node = ((struct orig_node *) |
1049 | hash_find(bat_priv->orig_hash, compare_orig, choose_orig, | 1103 | hash_find(bat_priv->orig_hash, compare_orig, choose_orig, |
1050 | icmp_packet->dst)); | 1104 | icmp_packet->dst)); |
1051 | rcu_read_unlock(); | 1105 | if (!orig_node) |
1106 | goto unlock; | ||
1052 | 1107 | ||
1053 | if ((orig_node) && (orig_node->router)) { | 1108 | kref_get(&orig_node->refcount); |
1109 | neigh_node = orig_node->router; | ||
1054 | 1110 | ||
1055 | /* don't lock while sending the packets ... we therefore | 1111 | if (!neigh_node) |
1056 | * copy the required data before sending */ | 1112 | goto unlock; |
1057 | batman_if = orig_node->router->if_incoming; | ||
1058 | memcpy(dstaddr, orig_node->router->addr, ETH_ALEN); | ||
1059 | spin_unlock_bh(&bat_priv->orig_hash_lock); | ||
1060 | 1113 | ||
1061 | /* create a copy of the skb, if needed, to modify it. */ | 1114 | if (!atomic_inc_not_zero(&neigh_node->refcount)) { |
1062 | if (skb_cow(skb, sizeof(struct ethhdr)) < 0) | 1115 | neigh_node = NULL; |
1063 | return NET_RX_DROP; | 1116 | goto unlock; |
1117 | } | ||
1118 | |||
1119 | rcu_read_unlock(); | ||
1064 | 1120 | ||
1065 | icmp_packet = (struct icmp_packet_rr *)skb->data; | 1121 | /* don't lock while sending the packets ... we therefore |
1122 | * copy the required data before sending */ | ||
1123 | batman_if = orig_node->router->if_incoming; | ||
1124 | memcpy(dstaddr, orig_node->router->addr, ETH_ALEN); | ||
1125 | spin_unlock_bh(&bat_priv->orig_hash_lock); | ||
1066 | 1126 | ||
1067 | /* decrement ttl */ | 1127 | /* create a copy of the skb, if needed, to modify it. */ |
1068 | icmp_packet->ttl--; | 1128 | if (skb_cow(skb, sizeof(struct ethhdr)) < 0) |
1129 | goto out; | ||
1069 | 1130 | ||
1070 | /* route it */ | 1131 | icmp_packet = (struct icmp_packet_rr *)skb->data; |
1071 | send_skb_packet(skb, batman_if, dstaddr); | ||
1072 | ret = NET_RX_SUCCESS; | ||
1073 | 1132 | ||
1074 | } else | 1133 | /* decrement ttl */ |
1075 | spin_unlock_bh(&bat_priv->orig_hash_lock); | 1134 | icmp_packet->ttl--; |
1076 | 1135 | ||
1136 | /* route it */ | ||
1137 | send_skb_packet(skb, batman_if, dstaddr); | ||
1138 | ret = NET_RX_SUCCESS; | ||
1139 | goto out; | ||
1140 | |||
1141 | unlock: | ||
1142 | rcu_read_unlock(); | ||
1143 | spin_unlock_bh(&bat_priv->orig_hash_lock); | ||
1144 | out: | ||
1145 | if (neigh_node) | ||
1146 | neigh_node_free_ref(neigh_node); | ||
1147 | if (orig_node) | ||
1148 | kref_put(&orig_node->refcount, orig_node_free_ref); | ||
1077 | return ret; | 1149 | return ret; |
1078 | } | 1150 | } |
1079 | 1151 | ||
@@ -1104,12 +1176,11 @@ struct neigh_node *find_router(struct bat_priv *bat_priv, | |||
1104 | /* select default router to output */ | 1176 | /* select default router to output */ |
1105 | router = orig_node->router; | 1177 | router = orig_node->router; |
1106 | router_orig = orig_node->router->orig_node; | 1178 | router_orig = orig_node->router->orig_node; |
1107 | if (!router_orig) { | 1179 | if (!router_orig || !atomic_inc_not_zero(&router->refcount)) { |
1108 | rcu_read_unlock(); | 1180 | rcu_read_unlock(); |
1109 | return NULL; | 1181 | return NULL; |
1110 | } | 1182 | } |
1111 | 1183 | ||
1112 | |||
1113 | if ((!recv_if) && (!bonding_enabled)) | 1184 | if ((!recv_if) && (!bonding_enabled)) |
1114 | goto return_router; | 1185 | goto return_router; |
1115 | 1186 | ||
@@ -1142,6 +1213,7 @@ struct neigh_node *find_router(struct bat_priv *bat_priv, | |||
1142 | * is is not on the interface where the packet came | 1213 | * is is not on the interface where the packet came |
1143 | * in. */ | 1214 | * in. */ |
1144 | 1215 | ||
1216 | neigh_node_free_ref(router); | ||
1145 | first_candidate = NULL; | 1217 | first_candidate = NULL; |
1146 | router = NULL; | 1218 | router = NULL; |
1147 | 1219 | ||
@@ -1154,16 +1226,23 @@ struct neigh_node *find_router(struct bat_priv *bat_priv, | |||
1154 | if (!first_candidate) | 1226 | if (!first_candidate) |
1155 | first_candidate = tmp_neigh_node; | 1227 | first_candidate = tmp_neigh_node; |
1156 | /* recv_if == NULL on the first node. */ | 1228 | /* recv_if == NULL on the first node. */ |
1157 | if (tmp_neigh_node->if_incoming != recv_if) { | 1229 | if (tmp_neigh_node->if_incoming != recv_if && |
1230 | atomic_inc_not_zero(&tmp_neigh_node->refcount)) { | ||
1158 | router = tmp_neigh_node; | 1231 | router = tmp_neigh_node; |
1159 | break; | 1232 | break; |
1160 | } | 1233 | } |
1161 | } | 1234 | } |
1162 | 1235 | ||
1163 | /* use the first candidate if nothing was found. */ | 1236 | /* use the first candidate if nothing was found. */ |
1164 | if (!router) | 1237 | if (!router && first_candidate && |
1238 | atomic_inc_not_zero(&first_candidate->refcount)) | ||
1165 | router = first_candidate; | 1239 | router = first_candidate; |
1166 | 1240 | ||
1241 | if (!router) { | ||
1242 | rcu_read_unlock(); | ||
1243 | return NULL; | ||
1244 | } | ||
1245 | |||
1167 | /* selected should point to the next element | 1246 | /* selected should point to the next element |
1168 | * after the current router */ | 1247 | * after the current router */ |
1169 | spin_lock_bh(&primary_orig_node->neigh_list_lock); | 1248 | spin_lock_bh(&primary_orig_node->neigh_list_lock); |
@@ -1184,21 +1263,34 @@ struct neigh_node *find_router(struct bat_priv *bat_priv, | |||
1184 | first_candidate = tmp_neigh_node; | 1263 | first_candidate = tmp_neigh_node; |
1185 | 1264 | ||
1186 | /* recv_if == NULL on the first node. */ | 1265 | /* recv_if == NULL on the first node. */ |
1187 | if (tmp_neigh_node->if_incoming != recv_if) | 1266 | if (tmp_neigh_node->if_incoming == recv_if) |
1188 | /* if we don't have a router yet | 1267 | continue; |
1189 | * or this one is better, choose it. */ | 1268 | |
1190 | if ((!router) || | 1269 | if (!atomic_inc_not_zero(&tmp_neigh_node->refcount)) |
1191 | (tmp_neigh_node->tq_avg > router->tq_avg)) { | 1270 | continue; |
1192 | router = tmp_neigh_node; | 1271 | |
1193 | } | 1272 | /* if we don't have a router yet |
1273 | * or this one is better, choose it. */ | ||
1274 | if ((!router) || | ||
1275 | (tmp_neigh_node->tq_avg > router->tq_avg)) { | ||
1276 | /* decrement refcount of | ||
1277 | * previously selected router */ | ||
1278 | if (router) | ||
1279 | neigh_node_free_ref(router); | ||
1280 | |||
1281 | router = tmp_neigh_node; | ||
1282 | atomic_inc_not_zero(&router->refcount); | ||
1283 | } | ||
1284 | |||
1285 | neigh_node_free_ref(tmp_neigh_node); | ||
1194 | } | 1286 | } |
1195 | 1287 | ||
1196 | /* use the first candidate if nothing was found. */ | 1288 | /* use the first candidate if nothing was found. */ |
1197 | if (!router) | 1289 | if (!router && first_candidate && |
1290 | atomic_inc_not_zero(&first_candidate->refcount)) | ||
1198 | router = first_candidate; | 1291 | router = first_candidate; |
1199 | } | 1292 | } |
1200 | return_router: | 1293 | return_router: |
1201 | kref_get(&router->refcount); | ||
1202 | rcu_read_unlock(); | 1294 | rcu_read_unlock(); |
1203 | return router; | 1295 | return router; |
1204 | } | 1296 | } |
@@ -1232,13 +1324,13 @@ int route_unicast_packet(struct sk_buff *skb, struct batman_if *recv_if, | |||
1232 | int hdr_size) | 1324 | int hdr_size) |
1233 | { | 1325 | { |
1234 | struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface); | 1326 | struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface); |
1235 | struct orig_node *orig_node; | 1327 | struct orig_node *orig_node = NULL; |
1236 | struct neigh_node *router; | 1328 | struct neigh_node *neigh_node = NULL; |
1237 | struct batman_if *batman_if; | 1329 | struct batman_if *batman_if; |
1238 | uint8_t dstaddr[ETH_ALEN]; | 1330 | uint8_t dstaddr[ETH_ALEN]; |
1239 | struct unicast_packet *unicast_packet; | 1331 | struct unicast_packet *unicast_packet; |
1240 | struct ethhdr *ethhdr = (struct ethhdr *)skb_mac_header(skb); | 1332 | struct ethhdr *ethhdr = (struct ethhdr *)skb_mac_header(skb); |
1241 | int ret; | 1333 | int ret = NET_RX_DROP; |
1242 | struct sk_buff *new_skb; | 1334 | struct sk_buff *new_skb; |
1243 | 1335 | ||
1244 | unicast_packet = (struct unicast_packet *)skb->data; | 1336 | unicast_packet = (struct unicast_packet *)skb->data; |
@@ -1248,7 +1340,7 @@ int route_unicast_packet(struct sk_buff *skb, struct batman_if *recv_if, | |||
1248 | pr_debug("Warning - can't forward unicast packet from %pM to " | 1340 | pr_debug("Warning - can't forward unicast packet from %pM to " |
1249 | "%pM: ttl exceeded\n", ethhdr->h_source, | 1341 | "%pM: ttl exceeded\n", ethhdr->h_source, |
1250 | unicast_packet->dest); | 1342 | unicast_packet->dest); |
1251 | return NET_RX_DROP; | 1343 | goto out; |
1252 | } | 1344 | } |
1253 | 1345 | ||
1254 | /* get routing information */ | 1346 | /* get routing information */ |
@@ -1257,27 +1349,29 @@ int route_unicast_packet(struct sk_buff *skb, struct batman_if *recv_if, | |||
1257 | orig_node = ((struct orig_node *) | 1349 | orig_node = ((struct orig_node *) |
1258 | hash_find(bat_priv->orig_hash, compare_orig, choose_orig, | 1350 | hash_find(bat_priv->orig_hash, compare_orig, choose_orig, |
1259 | unicast_packet->dest)); | 1351 | unicast_packet->dest)); |
1352 | if (!orig_node) | ||
1353 | goto unlock; | ||
1354 | |||
1355 | kref_get(&orig_node->refcount); | ||
1260 | rcu_read_unlock(); | 1356 | rcu_read_unlock(); |
1261 | 1357 | ||
1262 | /* find_router() increases neigh_nodes refcount if found. */ | 1358 | /* find_router() increases neigh_nodes refcount if found. */ |
1263 | router = find_router(bat_priv, orig_node, recv_if); | 1359 | neigh_node = find_router(bat_priv, orig_node, recv_if); |
1264 | 1360 | ||
1265 | if (!router) { | 1361 | if (!neigh_node) { |
1266 | spin_unlock_bh(&bat_priv->orig_hash_lock); | 1362 | spin_unlock_bh(&bat_priv->orig_hash_lock); |
1267 | return NET_RX_DROP; | 1363 | goto out; |
1268 | } | 1364 | } |
1269 | 1365 | ||
1270 | /* don't lock while sending the packets ... we therefore | 1366 | /* don't lock while sending the packets ... we therefore |
1271 | * copy the required data before sending */ | 1367 | * copy the required data before sending */ |
1272 | 1368 | batman_if = neigh_node->if_incoming; | |
1273 | batman_if = router->if_incoming; | 1369 | memcpy(dstaddr, neigh_node->addr, ETH_ALEN); |
1274 | memcpy(dstaddr, router->addr, ETH_ALEN); | ||
1275 | |||
1276 | spin_unlock_bh(&bat_priv->orig_hash_lock); | 1370 | spin_unlock_bh(&bat_priv->orig_hash_lock); |
1277 | 1371 | ||
1278 | /* create a copy of the skb, if needed, to modify it. */ | 1372 | /* create a copy of the skb, if needed, to modify it. */ |
1279 | if (skb_cow(skb, sizeof(struct ethhdr)) < 0) | 1373 | if (skb_cow(skb, sizeof(struct ethhdr)) < 0) |
1280 | return NET_RX_DROP; | 1374 | goto out; |
1281 | 1375 | ||
1282 | unicast_packet = (struct unicast_packet *)skb->data; | 1376 | unicast_packet = (struct unicast_packet *)skb->data; |
1283 | 1377 | ||
@@ -1293,11 +1387,13 @@ int route_unicast_packet(struct sk_buff *skb, struct batman_if *recv_if, | |||
1293 | ret = frag_reassemble_skb(skb, bat_priv, &new_skb); | 1387 | ret = frag_reassemble_skb(skb, bat_priv, &new_skb); |
1294 | 1388 | ||
1295 | if (ret == NET_RX_DROP) | 1389 | if (ret == NET_RX_DROP) |
1296 | return NET_RX_DROP; | 1390 | goto out; |
1297 | 1391 | ||
1298 | /* packet was buffered for late merge */ | 1392 | /* packet was buffered for late merge */ |
1299 | if (!new_skb) | 1393 | if (!new_skb) { |
1300 | return NET_RX_SUCCESS; | 1394 | ret = NET_RX_SUCCESS; |
1395 | goto out; | ||
1396 | } | ||
1301 | 1397 | ||
1302 | skb = new_skb; | 1398 | skb = new_skb; |
1303 | unicast_packet = (struct unicast_packet *)skb->data; | 1399 | unicast_packet = (struct unicast_packet *)skb->data; |
@@ -1308,8 +1404,18 @@ int route_unicast_packet(struct sk_buff *skb, struct batman_if *recv_if, | |||
1308 | 1404 | ||
1309 | /* route it */ | 1405 | /* route it */ |
1310 | send_skb_packet(skb, batman_if, dstaddr); | 1406 | send_skb_packet(skb, batman_if, dstaddr); |
1407 | ret = NET_RX_SUCCESS; | ||
1408 | goto out; | ||
1311 | 1409 | ||
1312 | return NET_RX_SUCCESS; | 1410 | unlock: |
1411 | rcu_read_unlock(); | ||
1412 | spin_unlock_bh(&bat_priv->orig_hash_lock); | ||
1413 | out: | ||
1414 | if (neigh_node) | ||
1415 | neigh_node_free_ref(neigh_node); | ||
1416 | if (orig_node) | ||
1417 | kref_put(&orig_node->refcount, orig_node_free_ref); | ||
1418 | return ret; | ||
1313 | } | 1419 | } |
1314 | 1420 | ||
1315 | int recv_unicast_packet(struct sk_buff *skb, struct batman_if *recv_if) | 1421 | int recv_unicast_packet(struct sk_buff *skb, struct batman_if *recv_if) |