aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv4/route.c
diff options
context:
space:
mode:
authorEric Dumazet <edumazet@google.com>2014-06-30 04:26:23 -0400
committerDavid S. Miller <davem@davemloft.net>2014-07-01 02:40:58 -0400
commit7f502361531e9eecb396cf99bdc9e9a59f7ebd7f (patch)
tree6aeca5011b3a7aaedc275e8680407bc7e77b714d /net/ipv4/route.c
parentdba63115ce0c888fcb4cdec3f8a4ba97d144afaf (diff)
ipv4: irq safe sk_dst_[re]set() and ipv4_sk_update_pmtu() fix
We have two different ways to handle changes to sk->sk_dst First way (used by TCP) assumes socket lock is owned by caller, and use no extra lock : __sk_dst_set() & __sk_dst_reset() Another way (used by UDP) uses sk_dst_lock because socket lock is not always taken. Note that sk_dst_lock is not softirq safe. These ways are not inter changeable for a given socket type. ipv4_sk_update_pmtu(), added in linux-3.8, added a race, as it used the socket lock as synchronization, but users might be UDP sockets. Instead of converting sk_dst_lock to a softirq safe version, use xchg() as we did for sk_rx_dst in commit e47eb5dfb296b ("udp: ipv4: do not use sk_dst_lock from softirq context") In a follow up patch, we probably can remove sk_dst_lock, as it is only used in IPv6. Signed-off-by: Eric Dumazet <edumazet@google.com> Cc: Steffen Klassert <steffen.klassert@secunet.com> Fixes: 9cb3a50c5f63e ("ipv4: Invalidate the socket cached route on pmtu events if possible") Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv4/route.c')
-rw-r--r--net/ipv4/route.c15
1 files changed, 8 insertions, 7 deletions
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index 082239ffe34a..3162ea923ded 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -1010,7 +1010,7 @@ void ipv4_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, u32 mtu)
1010 const struct iphdr *iph = (const struct iphdr *) skb->data; 1010 const struct iphdr *iph = (const struct iphdr *) skb->data;
1011 struct flowi4 fl4; 1011 struct flowi4 fl4;
1012 struct rtable *rt; 1012 struct rtable *rt;
1013 struct dst_entry *dst; 1013 struct dst_entry *odst = NULL;
1014 bool new = false; 1014 bool new = false;
1015 1015
1016 bh_lock_sock(sk); 1016 bh_lock_sock(sk);
@@ -1018,16 +1018,17 @@ void ipv4_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, u32 mtu)
1018 if (!ip_sk_accept_pmtu(sk)) 1018 if (!ip_sk_accept_pmtu(sk))
1019 goto out; 1019 goto out;
1020 1020
1021 rt = (struct rtable *) __sk_dst_get(sk); 1021 odst = sk_dst_get(sk);
1022 1022
1023 if (sock_owned_by_user(sk) || !rt) { 1023 if (sock_owned_by_user(sk) || !odst) {
1024 __ipv4_sk_update_pmtu(skb, sk, mtu); 1024 __ipv4_sk_update_pmtu(skb, sk, mtu);
1025 goto out; 1025 goto out;
1026 } 1026 }
1027 1027
1028 __build_flow_key(&fl4, sk, iph, 0, 0, 0, 0, 0); 1028 __build_flow_key(&fl4, sk, iph, 0, 0, 0, 0, 0);
1029 1029
1030 if (!__sk_dst_check(sk, 0)) { 1030 rt = (struct rtable *)odst;
1031 if (odst->obsolete && odst->ops->check(odst, 0) == NULL) {
1031 rt = ip_route_output_flow(sock_net(sk), &fl4, sk); 1032 rt = ip_route_output_flow(sock_net(sk), &fl4, sk);
1032 if (IS_ERR(rt)) 1033 if (IS_ERR(rt))
1033 goto out; 1034 goto out;
@@ -1037,8 +1038,7 @@ void ipv4_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, u32 mtu)
1037 1038
1038 __ip_rt_update_pmtu((struct rtable *) rt->dst.path, &fl4, mtu); 1039 __ip_rt_update_pmtu((struct rtable *) rt->dst.path, &fl4, mtu);
1039 1040
1040 dst = dst_check(&rt->dst, 0); 1041 if (!dst_check(&rt->dst, 0)) {
1041 if (!dst) {
1042 if (new) 1042 if (new)
1043 dst_release(&rt->dst); 1043 dst_release(&rt->dst);
1044 1044
@@ -1050,10 +1050,11 @@ void ipv4_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, u32 mtu)
1050 } 1050 }
1051 1051
1052 if (new) 1052 if (new)
1053 __sk_dst_set(sk, &rt->dst); 1053 sk_dst_set(sk, &rt->dst);
1054 1054
1055out: 1055out:
1056 bh_unlock_sock(sk); 1056 bh_unlock_sock(sk);
1057 dst_release(odst);
1057} 1058}
1058EXPORT_SYMBOL_GPL(ipv4_sk_update_pmtu); 1059EXPORT_SYMBOL_GPL(ipv4_sk_update_pmtu);
1059 1060