aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv4/route.c
diff options
context:
space:
mode:
authorSteffen Klassert <steffen.klassert@secunet.com>2013-01-20 20:59:11 -0500
committerDavid S. Miller <davem@davemloft.net>2013-01-21 14:17:05 -0500
commit9cb3a50c5f63ed745702972f66eaee8767659acd (patch)
tree2dc26f0aef8f48c21e91496492eaddae3d133c7f /net/ipv4/route.c
parentbc9540c637c3d8712ccbf9dcf28621f380ed5e64 (diff)
ipv4: Invalidate the socket cached route on pmtu events if possible
The route lookup in ipv4_sk_update_pmtu() might return a route different from the route we cached at the socket. This is because standart routes are per cpu, so each cpu has it's own struct rtable. This means that we do not invalidate the socket cached route if the NET_RX_SOFTIRQ is not served by the same cpu that the sending socket uses. As a result, the cached route reused until we disconnect. With this patch we invalidate the socket cached route if possible. If the socket is owened by the user, we can't update the cached route directly. A followup patch will implement socket release callback functions for datagram sockets to handle this case. Reported-by: Yurij M. Plotnikov <Yurij.Plotnikov@oktetlabs.ru> Signed-off-by: Steffen Klassert <steffen.klassert@secunet.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv4/route.c')
-rw-r--r--net/ipv4/route.c42
1 files changed, 41 insertions, 1 deletions
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index 259cbeee9a8b..132737a7c83a 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -965,7 +965,7 @@ void ipv4_update_pmtu(struct sk_buff *skb, struct net *net, u32 mtu,
965} 965}
966EXPORT_SYMBOL_GPL(ipv4_update_pmtu); 966EXPORT_SYMBOL_GPL(ipv4_update_pmtu);
967 967
968void ipv4_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, u32 mtu) 968static void __ipv4_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, u32 mtu)
969{ 969{
970 const struct iphdr *iph = (const struct iphdr *) skb->data; 970 const struct iphdr *iph = (const struct iphdr *) skb->data;
971 struct flowi4 fl4; 971 struct flowi4 fl4;
@@ -978,6 +978,46 @@ void ipv4_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, u32 mtu)
978 ip_rt_put(rt); 978 ip_rt_put(rt);
979 } 979 }
980} 980}
981
982void ipv4_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, u32 mtu)
983{
984 const struct iphdr *iph = (const struct iphdr *) skb->data;
985 struct flowi4 fl4;
986 struct rtable *rt;
987 struct dst_entry *dst;
988
989 bh_lock_sock(sk);
990 rt = (struct rtable *) __sk_dst_get(sk);
991
992 if (sock_owned_by_user(sk) || !rt) {
993 __ipv4_sk_update_pmtu(skb, sk, mtu);
994 goto out;
995 }
996
997 __build_flow_key(&fl4, sk, iph, 0, 0, 0, 0, 0);
998
999 if (!__sk_dst_check(sk, 0)) {
1000 rt = ip_route_output_flow(sock_net(sk), &fl4, sk);
1001 if (IS_ERR(rt))
1002 goto out;
1003 }
1004
1005 __ip_rt_update_pmtu((struct rtable *) rt->dst.path, &fl4, mtu);
1006
1007 dst = dst_check(&rt->dst, 0);
1008 if (!dst) {
1009 rt = ip_route_output_flow(sock_net(sk), &fl4, sk);
1010 if (IS_ERR(rt))
1011 goto out;
1012
1013 dst = &rt->dst;
1014 }
1015
1016 __sk_dst_set(sk, dst);
1017
1018out:
1019 bh_unlock_sock(sk);
1020}
981EXPORT_SYMBOL_GPL(ipv4_sk_update_pmtu); 1021EXPORT_SYMBOL_GPL(ipv4_sk_update_pmtu);
982 1022
983void ipv4_redirect(struct sk_buff *skb, struct net *net, 1023void ipv4_redirect(struct sk_buff *skb, struct net *net,