diff options
author | Eric Dumazet <edumazet@google.com> | 2014-09-22 16:19:44 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2014-09-22 16:27:10 -0400 |
commit | fcdd1cf4dd63aecf86c987d7f4ec7187be5c2fbc (patch) | |
tree | 9f74f24f8fe931ffac65805a30bf7e53de7e89b1 /net | |
parent | 35f7aa5309c048bb70e58571942795fa9411ce6a (diff) |
tcp: avoid possible arithmetic overflows
icsk_rto is a 32bit field, and icsk_backoff can reach 15 by default,
or more if some sysctl (eg tcp_retries2) are changed.
Better use 64bit to perform icsk_rto << icsk_backoff operations
As Joe Perches suggested, add a helper for this.
Yuchung spotted the tcp_v4_err() case.
Signed-off-by: Eric Dumazet <edumazet@google.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net')
-rw-r--r-- | net/ipv4/tcp_input.c | 5 | ||||
-rw-r--r-- | net/ipv4/tcp_ipv4.c | 6 | ||||
-rw-r--r-- | net/ipv4/tcp_output.c | 13 | ||||
-rw-r--r-- | net/ipv4/tcp_timer.c | 4 |
4 files changed, 14 insertions, 14 deletions
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 02fb66d4a018..13f3da4762e3 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c | |||
@@ -3208,9 +3208,10 @@ static void tcp_ack_probe(struct sock *sk) | |||
3208 | * This function is not for random using! | 3208 | * This function is not for random using! |
3209 | */ | 3209 | */ |
3210 | } else { | 3210 | } else { |
3211 | unsigned long when = inet_csk_rto_backoff(icsk, TCP_RTO_MAX); | ||
3212 | |||
3211 | inet_csk_reset_xmit_timer(sk, ICSK_TIME_PROBE0, | 3213 | inet_csk_reset_xmit_timer(sk, ICSK_TIME_PROBE0, |
3212 | min(icsk->icsk_rto << icsk->icsk_backoff, TCP_RTO_MAX), | 3214 | when, TCP_RTO_MAX); |
3213 | TCP_RTO_MAX); | ||
3214 | } | 3215 | } |
3215 | } | 3216 | } |
3216 | 3217 | ||
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index 006b045716d8..3b2e49cb2b61 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c | |||
@@ -430,9 +430,9 @@ void tcp_v4_err(struct sk_buff *icmp_skb, u32 info) | |||
430 | break; | 430 | break; |
431 | 431 | ||
432 | icsk->icsk_backoff--; | 432 | icsk->icsk_backoff--; |
433 | inet_csk(sk)->icsk_rto = (tp->srtt_us ? __tcp_set_rto(tp) : | 433 | icsk->icsk_rto = tp->srtt_us ? __tcp_set_rto(tp) : |
434 | TCP_TIMEOUT_INIT) << icsk->icsk_backoff; | 434 | TCP_TIMEOUT_INIT; |
435 | tcp_bound_rto(sk); | 435 | icsk->icsk_rto = inet_csk_rto_backoff(icsk, TCP_RTO_MAX); |
436 | 436 | ||
437 | skb = tcp_write_queue_head(sk); | 437 | skb = tcp_write_queue_head(sk); |
438 | BUG_ON(!skb); | 438 | BUG_ON(!skb); |
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index 7f1280dcad57..8c61a7c0c889 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c | |||
@@ -3279,6 +3279,7 @@ void tcp_send_probe0(struct sock *sk) | |||
3279 | { | 3279 | { |
3280 | struct inet_connection_sock *icsk = inet_csk(sk); | 3280 | struct inet_connection_sock *icsk = inet_csk(sk); |
3281 | struct tcp_sock *tp = tcp_sk(sk); | 3281 | struct tcp_sock *tp = tcp_sk(sk); |
3282 | unsigned long probe_max; | ||
3282 | int err; | 3283 | int err; |
3283 | 3284 | ||
3284 | err = tcp_write_wakeup(sk); | 3285 | err = tcp_write_wakeup(sk); |
@@ -3294,9 +3295,7 @@ void tcp_send_probe0(struct sock *sk) | |||
3294 | if (icsk->icsk_backoff < sysctl_tcp_retries2) | 3295 | if (icsk->icsk_backoff < sysctl_tcp_retries2) |
3295 | icsk->icsk_backoff++; | 3296 | icsk->icsk_backoff++; |
3296 | icsk->icsk_probes_out++; | 3297 | icsk->icsk_probes_out++; |
3297 | inet_csk_reset_xmit_timer(sk, ICSK_TIME_PROBE0, | 3298 | probe_max = TCP_RTO_MAX; |
3298 | min(icsk->icsk_rto << icsk->icsk_backoff, TCP_RTO_MAX), | ||
3299 | TCP_RTO_MAX); | ||
3300 | } else { | 3299 | } else { |
3301 | /* If packet was not sent due to local congestion, | 3300 | /* If packet was not sent due to local congestion, |
3302 | * do not backoff and do not remember icsk_probes_out. | 3301 | * do not backoff and do not remember icsk_probes_out. |
@@ -3306,11 +3305,11 @@ void tcp_send_probe0(struct sock *sk) | |||
3306 | */ | 3305 | */ |
3307 | if (!icsk->icsk_probes_out) | 3306 | if (!icsk->icsk_probes_out) |
3308 | icsk->icsk_probes_out = 1; | 3307 | icsk->icsk_probes_out = 1; |
3309 | inet_csk_reset_xmit_timer(sk, ICSK_TIME_PROBE0, | 3308 | probe_max = TCP_RESOURCE_PROBE_INTERVAL; |
3310 | min(icsk->icsk_rto << icsk->icsk_backoff, | ||
3311 | TCP_RESOURCE_PROBE_INTERVAL), | ||
3312 | TCP_RTO_MAX); | ||
3313 | } | 3309 | } |
3310 | inet_csk_reset_xmit_timer(sk, ICSK_TIME_PROBE0, | ||
3311 | inet_csk_rto_backoff(icsk, probe_max), | ||
3312 | TCP_RTO_MAX); | ||
3314 | } | 3313 | } |
3315 | 3314 | ||
3316 | int tcp_rtx_synack(struct sock *sk, struct request_sock *req) | 3315 | int tcp_rtx_synack(struct sock *sk, struct request_sock *req) |
diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c index a339e7ba05a4..b24360f6e293 100644 --- a/net/ipv4/tcp_timer.c +++ b/net/ipv4/tcp_timer.c | |||
@@ -180,7 +180,7 @@ static int tcp_write_timeout(struct sock *sk) | |||
180 | 180 | ||
181 | retry_until = sysctl_tcp_retries2; | 181 | retry_until = sysctl_tcp_retries2; |
182 | if (sock_flag(sk, SOCK_DEAD)) { | 182 | if (sock_flag(sk, SOCK_DEAD)) { |
183 | const int alive = (icsk->icsk_rto < TCP_RTO_MAX); | 183 | const int alive = icsk->icsk_rto < TCP_RTO_MAX; |
184 | 184 | ||
185 | retry_until = tcp_orphan_retries(sk, alive); | 185 | retry_until = tcp_orphan_retries(sk, alive); |
186 | do_reset = alive || | 186 | do_reset = alive || |
@@ -294,7 +294,7 @@ static void tcp_probe_timer(struct sock *sk) | |||
294 | max_probes = sysctl_tcp_retries2; | 294 | max_probes = sysctl_tcp_retries2; |
295 | 295 | ||
296 | if (sock_flag(sk, SOCK_DEAD)) { | 296 | if (sock_flag(sk, SOCK_DEAD)) { |
297 | const int alive = ((icsk->icsk_rto << icsk->icsk_backoff) < TCP_RTO_MAX); | 297 | const int alive = inet_csk_rto_backoff(icsk, TCP_RTO_MAX) < TCP_RTO_MAX; |
298 | 298 | ||
299 | max_probes = tcp_orphan_retries(sk, alive); | 299 | max_probes = tcp_orphan_retries(sk, alive); |
300 | 300 | ||