aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv4/tcp_timer.c
diff options
context:
space:
mode:
Diffstat (limited to 'net/ipv4/tcp_timer.c')
-rw-r--r--net/ipv4/tcp_timer.c14
1 files changed, 7 insertions, 7 deletions
diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
index bcc2f5783e57..c36089aa3515 100644
--- a/net/ipv4/tcp_timer.c
+++ b/net/ipv4/tcp_timer.c
@@ -333,7 +333,6 @@ static void tcp_probe_timer(struct sock *sk)
333 struct sk_buff *skb = tcp_send_head(sk); 333 struct sk_buff *skb = tcp_send_head(sk);
334 struct tcp_sock *tp = tcp_sk(sk); 334 struct tcp_sock *tp = tcp_sk(sk);
335 int max_probes; 335 int max_probes;
336 u32 start_ts;
337 336
338 if (tp->packets_out || !skb) { 337 if (tp->packets_out || !skb) {
339 icsk->icsk_probes_out = 0; 338 icsk->icsk_probes_out = 0;
@@ -348,12 +347,13 @@ static void tcp_probe_timer(struct sock *sk)
348 * corresponding system limit. We also implement similar policy when 347 * corresponding system limit. We also implement similar policy when
349 * we use RTO to probe window in tcp_retransmit_timer(). 348 * we use RTO to probe window in tcp_retransmit_timer().
350 */ 349 */
351 start_ts = tcp_skb_timestamp(skb); 350 if (icsk->icsk_user_timeout) {
352 if (!start_ts) 351 u32 elapsed = tcp_model_timeout(sk, icsk->icsk_probes_out,
353 skb->skb_mstamp_ns = tp->tcp_clock_cache; 352 tcp_probe0_base(sk));
354 else if (icsk->icsk_user_timeout && 353
355 (s32)(tcp_time_stamp(tp) - start_ts) > icsk->icsk_user_timeout) 354 if (elapsed >= icsk->icsk_user_timeout)
356 goto abort; 355 goto abort;
356 }
357 357
358 max_probes = sock_net(sk)->ipv4.sysctl_tcp_retries2; 358 max_probes = sock_net(sk)->ipv4.sysctl_tcp_retries2;
359 if (sock_flag(sk, SOCK_DEAD)) { 359 if (sock_flag(sk, SOCK_DEAD)) {