aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--include/net/tcp.h18
-rw-r--r--net/ipv4/tcp_timer.c11
2 files changed, 25 insertions, 4 deletions
diff --git a/include/net/tcp.h b/include/net/tcp.h
index 54f212ce8aaf..e5319495f15e 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -1252,6 +1252,24 @@ static inline struct sk_buff *tcp_write_queue_prev(struct sock *sk, struct sk_bu
1252#define tcp_for_write_queue_from_safe(skb, tmp, sk) \ 1252#define tcp_for_write_queue_from_safe(skb, tmp, sk) \
1253 skb_queue_walk_from_safe(&(sk)->sk_write_queue, skb, tmp) 1253 skb_queue_walk_from_safe(&(sk)->sk_write_queue, skb, tmp)
1254 1254
1255static inline bool retransmits_timed_out(const struct sock *sk,
1256 unsigned int boundary)
1257{
1258 int limit, K;
1259 if (!inet_csk(sk)->icsk_retransmits)
1260 return false;
1261
1262 K = ilog2(TCP_RTO_MAX/TCP_RTO_MIN);
1263
1264 if (boundary <= K)
1265 limit = ((2 << boundary) - 1) * TCP_RTO_MIN;
1266 else
1267 limit = ((2 << K) - 1) * TCP_RTO_MIN +
1268 (boundary - K) * TCP_RTO_MAX;
1269
1270 return (tcp_time_stamp - tcp_sk(sk)->retrans_stamp) >= limit;
1271}
1272
1255static inline struct sk_buff *tcp_send_head(struct sock *sk) 1273static inline struct sk_buff *tcp_send_head(struct sock *sk)
1256{ 1274{
1257 return sk->sk_send_head; 1275 return sk->sk_send_head;
diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
index 408fa4b7b9ba..cdb2ca7684d4 100644
--- a/net/ipv4/tcp_timer.c
+++ b/net/ipv4/tcp_timer.c
@@ -137,13 +137,14 @@ static int tcp_write_timeout(struct sock *sk)
137{ 137{
138 struct inet_connection_sock *icsk = inet_csk(sk); 138 struct inet_connection_sock *icsk = inet_csk(sk);
139 int retry_until; 139 int retry_until;
140 bool do_reset;
140 141
141 if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) { 142 if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) {
142 if (icsk->icsk_retransmits) 143 if (icsk->icsk_retransmits)
143 dst_negative_advice(&sk->sk_dst_cache); 144 dst_negative_advice(&sk->sk_dst_cache);
144 retry_until = icsk->icsk_syn_retries ? : sysctl_tcp_syn_retries; 145 retry_until = icsk->icsk_syn_retries ? : sysctl_tcp_syn_retries;
145 } else { 146 } else {
146 if (icsk->icsk_retransmits >= sysctl_tcp_retries1) { 147 if (retransmits_timed_out(sk, sysctl_tcp_retries1)) {
147 /* Black hole detection */ 148 /* Black hole detection */
148 tcp_mtu_probing(icsk, sk); 149 tcp_mtu_probing(icsk, sk);
149 150
@@ -155,13 +156,15 @@ static int tcp_write_timeout(struct sock *sk)
155 const int alive = (icsk->icsk_rto < TCP_RTO_MAX); 156 const int alive = (icsk->icsk_rto < TCP_RTO_MAX);
156 157
157 retry_until = tcp_orphan_retries(sk, alive); 158 retry_until = tcp_orphan_retries(sk, alive);
159 do_reset = alive ||
160 !retransmits_timed_out(sk, retry_until);
158 161
159 if (tcp_out_of_resources(sk, alive || icsk->icsk_retransmits < retry_until)) 162 if (tcp_out_of_resources(sk, do_reset))
160 return 1; 163 return 1;
161 } 164 }
162 } 165 }
163 166
164 if (icsk->icsk_retransmits >= retry_until) { 167 if (retransmits_timed_out(sk, retry_until)) {
165 /* Has it gone just too far? */ 168 /* Has it gone just too far? */
166 tcp_write_err(sk); 169 tcp_write_err(sk);
167 return 1; 170 return 1;
@@ -385,7 +388,7 @@ void tcp_retransmit_timer(struct sock *sk)
385out_reset_timer: 388out_reset_timer:
386 icsk->icsk_rto = min(icsk->icsk_rto << 1, TCP_RTO_MAX); 389 icsk->icsk_rto = min(icsk->icsk_rto << 1, TCP_RTO_MAX);
387 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, icsk->icsk_rto, TCP_RTO_MAX); 390 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, icsk->icsk_rto, TCP_RTO_MAX);
388 if (icsk->icsk_retransmits > sysctl_tcp_retries1) 391 if (retransmits_timed_out(sk, sysctl_tcp_retries1 + 1))
389 __sk_dst_reset(sk); 392 __sk_dst_reset(sk);
390 393
391out:; 394out:;