diff options
author | Eric Dumazet <dada1@cosmosbay.com> | 2007-12-21 04:50:43 -0500 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2008-01-28 17:59:58 -0500 |
commit | ce55dd3610f7ac29bf8d159c2e2ace9aaf2c3038 (patch) | |
tree | d5187da8b1723c5b815337b2c3814d776354e220 /net/ipv4/tcp_timer.c | |
parent | b790cedd24a7f7d1639072b3faf35f1f56cb38ea (diff) |
[TCP]: tcp_write_timeout.c cleanup
Before submiting a patch to change a divide to a right shift, I felt
necessary to create a helper function tcp_mtu_probing() to reduce length of
lines exceeding 100 chars in tcp_write_timeout().
Signed-off-by: Eric Dumazet <dada1@cosmosbay.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv4/tcp_timer.c')
-rw-r--r-- | net/ipv4/tcp_timer.c | 35 |
1 files changed, 21 insertions, 14 deletions
diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c index d8970ecfcfc8..8f1480808f90 100644 --- a/net/ipv4/tcp_timer.c +++ b/net/ipv4/tcp_timer.c | |||
@@ -114,13 +114,31 @@ static int tcp_orphan_retries(struct sock *sk, int alive) | |||
114 | return retries; | 114 | return retries; |
115 | } | 115 | } |
116 | 116 | ||
117 | static void tcp_mtu_probing(struct inet_connection_sock *icsk, struct sock *sk) | ||
118 | { | ||
119 | int mss; | ||
120 | |||
121 | /* Black hole detection */ | ||
122 | if (sysctl_tcp_mtu_probing) { | ||
123 | if (!icsk->icsk_mtup.enabled) { | ||
124 | icsk->icsk_mtup.enabled = 1; | ||
125 | tcp_sync_mss(sk, icsk->icsk_pmtu_cookie); | ||
126 | } else { | ||
127 | struct tcp_sock *tp = tcp_sk(sk); | ||
128 | mss = tcp_mtu_to_mss(sk, icsk->icsk_mtup.search_low)/2; | ||
129 | mss = min(sysctl_tcp_base_mss, mss); | ||
130 | mss = max(mss, 68 - tp->tcp_header_len); | ||
131 | icsk->icsk_mtup.search_low = tcp_mss_to_mtu(sk, mss); | ||
132 | tcp_sync_mss(sk, icsk->icsk_pmtu_cookie); | ||
133 | } | ||
134 | } | ||
135 | } | ||
136 | |||
117 | /* A write timeout has occurred. Process the after effects. */ | 137 | /* A write timeout has occurred. Process the after effects. */ |
118 | static int tcp_write_timeout(struct sock *sk) | 138 | static int tcp_write_timeout(struct sock *sk) |
119 | { | 139 | { |
120 | struct inet_connection_sock *icsk = inet_csk(sk); | 140 | struct inet_connection_sock *icsk = inet_csk(sk); |
121 | struct tcp_sock *tp = tcp_sk(sk); | ||
122 | int retry_until; | 141 | int retry_until; |
123 | int mss; | ||
124 | 142 | ||
125 | if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) { | 143 | if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) { |
126 | if (icsk->icsk_retransmits) | 144 | if (icsk->icsk_retransmits) |
@@ -129,18 +147,7 @@ static int tcp_write_timeout(struct sock *sk) | |||
129 | } else { | 147 | } else { |
130 | if (icsk->icsk_retransmits >= sysctl_tcp_retries1) { | 148 | if (icsk->icsk_retransmits >= sysctl_tcp_retries1) { |
131 | /* Black hole detection */ | 149 | /* Black hole detection */ |
132 | if (sysctl_tcp_mtu_probing) { | 150 | tcp_mtu_probing(icsk, sk); |
133 | if (!icsk->icsk_mtup.enabled) { | ||
134 | icsk->icsk_mtup.enabled = 1; | ||
135 | tcp_sync_mss(sk, icsk->icsk_pmtu_cookie); | ||
136 | } else { | ||
137 | mss = min(sysctl_tcp_base_mss, | ||
138 | tcp_mtu_to_mss(sk, icsk->icsk_mtup.search_low)/2); | ||
139 | mss = max(mss, 68 - tp->tcp_header_len); | ||
140 | icsk->icsk_mtup.search_low = tcp_mss_to_mtu(sk, mss); | ||
141 | tcp_sync_mss(sk, icsk->icsk_pmtu_cookie); | ||
142 | } | ||
143 | } | ||
144 | 151 | ||
145 | dst_negative_advice(&sk->sk_dst_cache); | 152 | dst_negative_advice(&sk->sk_dst_cache); |
146 | } | 153 | } |