diff options
author | Eric Dumazet <eric.dumazet@gmail.com> | 2010-04-08 19:03:29 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2010-04-13 04:41:33 -0400 |
commit | b6c6712a42ca3f9fa7f4a3d7c40e3a9dd1fd9e03 (patch) | |
tree | 42032b4978874e8ffcf6c851d13324b8c8c7c113 /net/ipv4/tcp_timer.c | |
parent | 7a161ea92471087a1579239d7a58dd06eaa5601c (diff) |
net: sk_dst_cache RCUification
With latest CONFIG_PROVE_RCU stuff, I felt more comfortable to make this
work.
sk->sk_dst_cache is currently protected by a rwlock (sk_dst_lock)
This rwlock is readlocked for a very small amount of time, and dst
entries are already freed after RCU grace period. This calls for RCU
again :)
This patch converts sk_dst_lock to a spinlock, and use RCU for readers.
__sk_dst_get() is supposed to be called with rcu_read_lock() or if
socket locked by user, so use appropriate rcu_dereference_check()
condition (rcu_read_lock_held() || sock_owned_by_user(sk))
This patch avoids two atomic ops per tx packet on UDP connected sockets,
for example, and permits sk_dst_lock to be much less dirtied.
Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv4/tcp_timer.c')
-rw-r--r-- | net/ipv4/tcp_timer.c | 4 |
1 files changed, 2 insertions, 2 deletions
diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c index 8a0ab2977f1f..c732be00606b 100644 --- a/net/ipv4/tcp_timer.c +++ b/net/ipv4/tcp_timer.c | |||
@@ -172,14 +172,14 @@ static int tcp_write_timeout(struct sock *sk) | |||
172 | 172 | ||
173 | if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) { | 173 | if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) { |
174 | if (icsk->icsk_retransmits) | 174 | if (icsk->icsk_retransmits) |
175 | dst_negative_advice(&sk->sk_dst_cache, sk); | 175 | dst_negative_advice(sk); |
176 | retry_until = icsk->icsk_syn_retries ? : sysctl_tcp_syn_retries; | 176 | retry_until = icsk->icsk_syn_retries ? : sysctl_tcp_syn_retries; |
177 | } else { | 177 | } else { |
178 | if (retransmits_timed_out(sk, sysctl_tcp_retries1)) { | 178 | if (retransmits_timed_out(sk, sysctl_tcp_retries1)) { |
179 | /* Black hole detection */ | 179 | /* Black hole detection */ |
180 | tcp_mtu_probing(icsk, sk); | 180 | tcp_mtu_probing(icsk, sk); |
181 | 181 | ||
182 | dst_negative_advice(&sk->sk_dst_cache, sk); | 182 | dst_negative_advice(sk); |
183 | } | 183 | } |
184 | 184 | ||
185 | retry_until = sysctl_tcp_retries2; | 185 | retry_until = sysctl_tcp_retries2; |