aboutsummaryrefslogtreecommitdiffstats
path: root/include/net/sock.h
diff options
context:
space:
mode:
authorEric Dumazet <edumazet@google.com>2014-06-30 04:26:23 -0400
committerDavid S. Miller <davem@davemloft.net>2014-07-01 02:40:58 -0400
commit7f502361531e9eecb396cf99bdc9e9a59f7ebd7f (patch)
tree6aeca5011b3a7aaedc275e8680407bc7e77b714d /include/net/sock.h
parentdba63115ce0c888fcb4cdec3f8a4ba97d144afaf (diff)
ipv4: irq safe sk_dst_[re]set() and ipv4_sk_update_pmtu() fix
We have two different ways to handle changes to sk->sk_dst First way (used by TCP) assumes socket lock is owned by caller, and use no extra lock : __sk_dst_set() & __sk_dst_reset() Another way (used by UDP) uses sk_dst_lock because socket lock is not always taken. Note that sk_dst_lock is not softirq safe. These ways are not inter changeable for a given socket type. ipv4_sk_update_pmtu(), added in linux-3.8, added a race, as it used the socket lock as synchronization, but users might be UDP sockets. Instead of converting sk_dst_lock to a softirq safe version, use xchg() as we did for sk_rx_dst in commit e47eb5dfb296b ("udp: ipv4: do not use sk_dst_lock from softirq context") In a follow up patch, we probably can remove sk_dst_lock, as it is only used in IPv6. Signed-off-by: Eric Dumazet <edumazet@google.com> Cc: Steffen Klassert <steffen.klassert@secunet.com> Fixes: 9cb3a50c5f63e ("ipv4: Invalidate the socket cached route on pmtu events if possible") Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'include/net/sock.h')
-rw-r--r--include/net/sock.h12
1 files changed, 6 insertions, 6 deletions
diff --git a/include/net/sock.h b/include/net/sock.h
index 173cae485de1..c556fd9b05ac 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -1768,9 +1768,11 @@ __sk_dst_set(struct sock *sk, struct dst_entry *dst)
1768static inline void 1768static inline void
1769sk_dst_set(struct sock *sk, struct dst_entry *dst) 1769sk_dst_set(struct sock *sk, struct dst_entry *dst)
1770{ 1770{
1771 spin_lock(&sk->sk_dst_lock); 1771 struct dst_entry *old_dst;
1772 __sk_dst_set(sk, dst); 1772
1773 spin_unlock(&sk->sk_dst_lock); 1773 sk_tx_queue_clear(sk);
1774 old_dst = xchg(&sk->sk_dst_cache, dst);
1775 dst_release(old_dst);
1774} 1776}
1775 1777
1776static inline void 1778static inline void
@@ -1782,9 +1784,7 @@ __sk_dst_reset(struct sock *sk)
1782static inline void 1784static inline void
1783sk_dst_reset(struct sock *sk) 1785sk_dst_reset(struct sock *sk)
1784{ 1786{
1785 spin_lock(&sk->sk_dst_lock); 1787 sk_dst_set(sk, NULL);
1786 __sk_dst_reset(sk);
1787 spin_unlock(&sk->sk_dst_lock);
1788} 1788}
1789 1789
1790struct dst_entry *__sk_dst_check(struct sock *sk, u32 cookie); 1790struct dst_entry *__sk_dst_check(struct sock *sk, u32 cookie);