diff options
Diffstat (limited to 'net/ipv4/tcp_timer.c')
-rw-r--r-- | net/ipv4/tcp_timer.c | 53 |
1 files changed, 30 insertions, 23 deletions
diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c index 74c54b30600f..ecd44b0c45f1 100644 --- a/net/ipv4/tcp_timer.c +++ b/net/ipv4/tcp_timer.c | |||
@@ -140,10 +140,10 @@ static void tcp_mtu_probing(struct inet_connection_sock *icsk, struct sock *sk) | |||
140 | */ | 140 | */ |
141 | static bool retransmits_timed_out(struct sock *sk, | 141 | static bool retransmits_timed_out(struct sock *sk, |
142 | unsigned int boundary, | 142 | unsigned int boundary, |
143 | unsigned int timeout, | ||
143 | bool syn_set) | 144 | bool syn_set) |
144 | { | 145 | { |
145 | unsigned int timeout, linear_backoff_thresh; | 146 | unsigned int linear_backoff_thresh, start_ts; |
146 | unsigned int start_ts; | ||
147 | unsigned int rto_base = syn_set ? TCP_TIMEOUT_INIT : TCP_RTO_MIN; | 147 | unsigned int rto_base = syn_set ? TCP_TIMEOUT_INIT : TCP_RTO_MIN; |
148 | 148 | ||
149 | if (!inet_csk(sk)->icsk_retransmits) | 149 | if (!inet_csk(sk)->icsk_retransmits) |
@@ -154,14 +154,15 @@ static bool retransmits_timed_out(struct sock *sk, | |||
154 | else | 154 | else |
155 | start_ts = tcp_sk(sk)->retrans_stamp; | 155 | start_ts = tcp_sk(sk)->retrans_stamp; |
156 | 156 | ||
157 | linear_backoff_thresh = ilog2(TCP_RTO_MAX/rto_base); | 157 | if (likely(timeout == 0)) { |
158 | 158 | linear_backoff_thresh = ilog2(TCP_RTO_MAX/rto_base); | |
159 | if (boundary <= linear_backoff_thresh) | ||
160 | timeout = ((2 << boundary) - 1) * rto_base; | ||
161 | else | ||
162 | timeout = ((2 << linear_backoff_thresh) - 1) * rto_base + | ||
163 | (boundary - linear_backoff_thresh) * TCP_RTO_MAX; | ||
164 | 159 | ||
160 | if (boundary <= linear_backoff_thresh) | ||
161 | timeout = ((2 << boundary) - 1) * rto_base; | ||
162 | else | ||
163 | timeout = ((2 << linear_backoff_thresh) - 1) * rto_base + | ||
164 | (boundary - linear_backoff_thresh) * TCP_RTO_MAX; | ||
165 | } | ||
165 | return (tcp_time_stamp - start_ts) >= timeout; | 166 | return (tcp_time_stamp - start_ts) >= timeout; |
166 | } | 167 | } |
167 | 168 | ||
@@ -178,7 +179,7 @@ static int tcp_write_timeout(struct sock *sk) | |||
178 | retry_until = icsk->icsk_syn_retries ? : sysctl_tcp_syn_retries; | 179 | retry_until = icsk->icsk_syn_retries ? : sysctl_tcp_syn_retries; |
179 | syn_set = 1; | 180 | syn_set = 1; |
180 | } else { | 181 | } else { |
181 | if (retransmits_timed_out(sk, sysctl_tcp_retries1, 0)) { | 182 | if (retransmits_timed_out(sk, sysctl_tcp_retries1, 0, 0)) { |
182 | /* Black hole detection */ | 183 | /* Black hole detection */ |
183 | tcp_mtu_probing(icsk, sk); | 184 | tcp_mtu_probing(icsk, sk); |
184 | 185 | ||
@@ -191,14 +192,15 @@ static int tcp_write_timeout(struct sock *sk) | |||
191 | 192 | ||
192 | retry_until = tcp_orphan_retries(sk, alive); | 193 | retry_until = tcp_orphan_retries(sk, alive); |
193 | do_reset = alive || | 194 | do_reset = alive || |
194 | !retransmits_timed_out(sk, retry_until, 0); | 195 | !retransmits_timed_out(sk, retry_until, 0, 0); |
195 | 196 | ||
196 | if (tcp_out_of_resources(sk, do_reset)) | 197 | if (tcp_out_of_resources(sk, do_reset)) |
197 | return 1; | 198 | return 1; |
198 | } | 199 | } |
199 | } | 200 | } |
200 | 201 | ||
201 | if (retransmits_timed_out(sk, retry_until, syn_set)) { | 202 | if (retransmits_timed_out(sk, retry_until, |
203 | syn_set ? 0 : icsk->icsk_user_timeout, syn_set)) { | ||
202 | /* Has it gone just too far? */ | 204 | /* Has it gone just too far? */ |
203 | tcp_write_err(sk); | 205 | tcp_write_err(sk); |
204 | return 1; | 206 | return 1; |
@@ -257,7 +259,6 @@ static void tcp_delack_timer(unsigned long data) | |||
257 | tcp_send_ack(sk); | 259 | tcp_send_ack(sk); |
258 | NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_DELAYEDACKS); | 260 | NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_DELAYEDACKS); |
259 | } | 261 | } |
260 | TCP_CHECK_TIMER(sk); | ||
261 | 262 | ||
262 | out: | 263 | out: |
263 | if (tcp_memory_pressure) | 264 | if (tcp_memory_pressure) |
@@ -365,18 +366,19 @@ void tcp_retransmit_timer(struct sock *sk) | |||
365 | if (icsk->icsk_retransmits == 0) { | 366 | if (icsk->icsk_retransmits == 0) { |
366 | int mib_idx; | 367 | int mib_idx; |
367 | 368 | ||
368 | if (icsk->icsk_ca_state == TCP_CA_Disorder) { | 369 | if (icsk->icsk_ca_state == TCP_CA_Recovery) { |
369 | if (tcp_is_sack(tp)) | ||
370 | mib_idx = LINUX_MIB_TCPSACKFAILURES; | ||
371 | else | ||
372 | mib_idx = LINUX_MIB_TCPRENOFAILURES; | ||
373 | } else if (icsk->icsk_ca_state == TCP_CA_Recovery) { | ||
374 | if (tcp_is_sack(tp)) | 370 | if (tcp_is_sack(tp)) |
375 | mib_idx = LINUX_MIB_TCPSACKRECOVERYFAIL; | 371 | mib_idx = LINUX_MIB_TCPSACKRECOVERYFAIL; |
376 | else | 372 | else |
377 | mib_idx = LINUX_MIB_TCPRENORECOVERYFAIL; | 373 | mib_idx = LINUX_MIB_TCPRENORECOVERYFAIL; |
378 | } else if (icsk->icsk_ca_state == TCP_CA_Loss) { | 374 | } else if (icsk->icsk_ca_state == TCP_CA_Loss) { |
379 | mib_idx = LINUX_MIB_TCPLOSSFAILURES; | 375 | mib_idx = LINUX_MIB_TCPLOSSFAILURES; |
376 | } else if ((icsk->icsk_ca_state == TCP_CA_Disorder) || | ||
377 | tp->sacked_out) { | ||
378 | if (tcp_is_sack(tp)) | ||
379 | mib_idx = LINUX_MIB_TCPSACKFAILURES; | ||
380 | else | ||
381 | mib_idx = LINUX_MIB_TCPRENOFAILURES; | ||
380 | } else { | 382 | } else { |
381 | mib_idx = LINUX_MIB_TCPTIMEOUTS; | 383 | mib_idx = LINUX_MIB_TCPTIMEOUTS; |
382 | } | 384 | } |
@@ -440,7 +442,7 @@ out_reset_timer: | |||
440 | icsk->icsk_rto = min(icsk->icsk_rto << 1, TCP_RTO_MAX); | 442 | icsk->icsk_rto = min(icsk->icsk_rto << 1, TCP_RTO_MAX); |
441 | } | 443 | } |
442 | inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, icsk->icsk_rto, TCP_RTO_MAX); | 444 | inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, icsk->icsk_rto, TCP_RTO_MAX); |
443 | if (retransmits_timed_out(sk, sysctl_tcp_retries1 + 1, 0)) | 445 | if (retransmits_timed_out(sk, sysctl_tcp_retries1 + 1, 0, 0)) |
444 | __sk_dst_reset(sk); | 446 | __sk_dst_reset(sk); |
445 | 447 | ||
446 | out:; | 448 | out:; |
@@ -478,7 +480,6 @@ static void tcp_write_timer(unsigned long data) | |||
478 | tcp_probe_timer(sk); | 480 | tcp_probe_timer(sk); |
479 | break; | 481 | break; |
480 | } | 482 | } |
481 | TCP_CHECK_TIMER(sk); | ||
482 | 483 | ||
483 | out: | 484 | out: |
484 | sk_mem_reclaim(sk); | 485 | sk_mem_reclaim(sk); |
@@ -560,7 +561,14 @@ static void tcp_keepalive_timer (unsigned long data) | |||
560 | elapsed = keepalive_time_elapsed(tp); | 561 | elapsed = keepalive_time_elapsed(tp); |
561 | 562 | ||
562 | if (elapsed >= keepalive_time_when(tp)) { | 563 | if (elapsed >= keepalive_time_when(tp)) { |
563 | if (icsk->icsk_probes_out >= keepalive_probes(tp)) { | 564 | /* If the TCP_USER_TIMEOUT option is enabled, use that |
565 | * to determine when to timeout instead. | ||
566 | */ | ||
567 | if ((icsk->icsk_user_timeout != 0 && | ||
568 | elapsed >= icsk->icsk_user_timeout && | ||
569 | icsk->icsk_probes_out > 0) || | ||
570 | (icsk->icsk_user_timeout == 0 && | ||
571 | icsk->icsk_probes_out >= keepalive_probes(tp))) { | ||
564 | tcp_send_active_reset(sk, GFP_ATOMIC); | 572 | tcp_send_active_reset(sk, GFP_ATOMIC); |
565 | tcp_write_err(sk); | 573 | tcp_write_err(sk); |
566 | goto out; | 574 | goto out; |
@@ -579,7 +587,6 @@ static void tcp_keepalive_timer (unsigned long data) | |||
579 | elapsed = keepalive_time_when(tp) - elapsed; | 587 | elapsed = keepalive_time_when(tp) - elapsed; |
580 | } | 588 | } |
581 | 589 | ||
582 | TCP_CHECK_TIMER(sk); | ||
583 | sk_mem_reclaim(sk); | 590 | sk_mem_reclaim(sk); |
584 | 591 | ||
585 | resched: | 592 | resched: |