aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorEric Dumazet <edumazet@google.com>2017-12-10 20:55:03 -0500
committerDavid S. Miller <davem@davemloft.net>2017-12-12 10:53:04 -0500
commit607065bad9931e72207b0cac365d7d4abc06bd99 (patch)
treed18808d1c3bf46dd8a899c3877257ea8c4961b61
parent02db55718d53f9d426cee504c27fb768e9ed4ffe (diff)
tcp: avoid integer overflows in tcp_rcv_space_adjust()
When using large tcp_rmem[2] values (I did tests with 500 MB), I noticed overflows while computing rcvwin. Lets fix this before the following patch. Signed-off-by: Eric Dumazet <edumazet@google.com> Acked-by: Soheil Hassas Yeganeh <soheil@google.com> Acked-by: Wei Wang <weiwan@google.com> Acked-by: Neal Cardwell <ncardwell@google.com> Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--include/linux/tcp.h2
-rw-r--r--net/ipv4/tcp_input.c12
2 files changed, 8 insertions, 6 deletions
diff --git a/include/linux/tcp.h b/include/linux/tcp.h
index ca4a6361389b..4f93f0953c41 100644
--- a/include/linux/tcp.h
+++ b/include/linux/tcp.h
@@ -344,7 +344,7 @@ struct tcp_sock {
344 344
345/* Receiver queue space */ 345/* Receiver queue space */
346 struct { 346 struct {
347 int space; 347 u32 space;
348 u32 seq; 348 u32 seq;
349 u64 time; 349 u64 time;
350 } rcvq_space; 350 } rcvq_space;
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 746a6773c482..2900e58738cd 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -576,8 +576,8 @@ static inline void tcp_rcv_rtt_measure_ts(struct sock *sk,
576void tcp_rcv_space_adjust(struct sock *sk) 576void tcp_rcv_space_adjust(struct sock *sk)
577{ 577{
578 struct tcp_sock *tp = tcp_sk(sk); 578 struct tcp_sock *tp = tcp_sk(sk);
579 u32 copied;
579 int time; 580 int time;
580 int copied;
581 581
582 tcp_mstamp_refresh(tp); 582 tcp_mstamp_refresh(tp);
583 time = tcp_stamp_us_delta(tp->tcp_mstamp, tp->rcvq_space.time); 583 time = tcp_stamp_us_delta(tp->tcp_mstamp, tp->rcvq_space.time);
@@ -600,12 +600,13 @@ void tcp_rcv_space_adjust(struct sock *sk)
600 600
601 if (sock_net(sk)->ipv4.sysctl_tcp_moderate_rcvbuf && 601 if (sock_net(sk)->ipv4.sysctl_tcp_moderate_rcvbuf &&
602 !(sk->sk_userlocks & SOCK_RCVBUF_LOCK)) { 602 !(sk->sk_userlocks & SOCK_RCVBUF_LOCK)) {
603 int rcvwin, rcvmem, rcvbuf; 603 int rcvmem, rcvbuf;
604 u64 rcvwin;
604 605
605 /* minimal window to cope with packet losses, assuming 606 /* minimal window to cope with packet losses, assuming
606 * steady state. Add some cushion because of small variations. 607 * steady state. Add some cushion because of small variations.
607 */ 608 */
608 rcvwin = (copied << 1) + 16 * tp->advmss; 609 rcvwin = ((u64)copied << 1) + 16 * tp->advmss;
609 610
610 /* If rate increased by 25%, 611 /* If rate increased by 25%,
611 * assume slow start, rcvwin = 3 * copied 612 * assume slow start, rcvwin = 3 * copied
@@ -625,8 +626,9 @@ void tcp_rcv_space_adjust(struct sock *sk)
625 while (tcp_win_from_space(sk, rcvmem) < tp->advmss) 626 while (tcp_win_from_space(sk, rcvmem) < tp->advmss)
626 rcvmem += 128; 627 rcvmem += 128;
627 628
628 rcvbuf = min(rcvwin / tp->advmss * rcvmem, 629 do_div(rcvwin, tp->advmss);
629 sock_net(sk)->ipv4.sysctl_tcp_rmem[2]); 630 rcvbuf = min_t(u64, rcvwin * rcvmem,
631 sock_net(sk)->ipv4.sysctl_tcp_rmem[2]);
630 if (rcvbuf > sk->sk_rcvbuf) { 632 if (rcvbuf > sk->sk_rcvbuf) {
631 sk->sk_rcvbuf = rcvbuf; 633 sk->sk_rcvbuf = rcvbuf;
632 634