diff options
Diffstat (limited to 'net/ipv4/tcp_output.c')
-rw-r--r-- | net/ipv4/tcp_output.c | 356 |
1 files changed, 139 insertions, 217 deletions
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index 817fbb396bc8..e787ecec505e 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c | |||
@@ -65,15 +65,13 @@ int sysctl_tcp_base_mss __read_mostly = TCP_BASE_MSS; | |||
65 | /* By default, RFC2861 behavior. */ | 65 | /* By default, RFC2861 behavior. */ |
66 | int sysctl_tcp_slow_start_after_idle __read_mostly = 1; | 66 | int sysctl_tcp_slow_start_after_idle __read_mostly = 1; |
67 | 67 | ||
68 | int sysctl_tcp_cookie_size __read_mostly = 0; /* TCP_COOKIE_MAX */ | ||
69 | EXPORT_SYMBOL_GPL(sysctl_tcp_cookie_size); | ||
70 | |||
71 | static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle, | 68 | static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle, |
72 | int push_one, gfp_t gfp); | 69 | int push_one, gfp_t gfp); |
73 | 70 | ||
74 | /* Account for new data that has been sent to the network. */ | 71 | /* Account for new data that has been sent to the network. */ |
75 | static void tcp_event_new_data_sent(struct sock *sk, const struct sk_buff *skb) | 72 | static void tcp_event_new_data_sent(struct sock *sk, const struct sk_buff *skb) |
76 | { | 73 | { |
74 | struct inet_connection_sock *icsk = inet_csk(sk); | ||
77 | struct tcp_sock *tp = tcp_sk(sk); | 75 | struct tcp_sock *tp = tcp_sk(sk); |
78 | unsigned int prior_packets = tp->packets_out; | 76 | unsigned int prior_packets = tp->packets_out; |
79 | 77 | ||
@@ -85,7 +83,8 @@ static void tcp_event_new_data_sent(struct sock *sk, const struct sk_buff *skb) | |||
85 | tp->frto_counter = 3; | 83 | tp->frto_counter = 3; |
86 | 84 | ||
87 | tp->packets_out += tcp_skb_pcount(skb); | 85 | tp->packets_out += tcp_skb_pcount(skb); |
88 | if (!prior_packets || tp->early_retrans_delayed) | 86 | if (!prior_packets || icsk->icsk_pending == ICSK_TIME_EARLY_RETRANS || |
87 | icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) | ||
89 | tcp_rearm_rto(sk); | 88 | tcp_rearm_rto(sk); |
90 | } | 89 | } |
91 | 90 | ||
@@ -384,7 +383,6 @@ static inline bool tcp_urg_mode(const struct tcp_sock *tp) | |||
384 | #define OPTION_TS (1 << 1) | 383 | #define OPTION_TS (1 << 1) |
385 | #define OPTION_MD5 (1 << 2) | 384 | #define OPTION_MD5 (1 << 2) |
386 | #define OPTION_WSCALE (1 << 3) | 385 | #define OPTION_WSCALE (1 << 3) |
387 | #define OPTION_COOKIE_EXTENSION (1 << 4) | ||
388 | #define OPTION_FAST_OPEN_COOKIE (1 << 8) | 386 | #define OPTION_FAST_OPEN_COOKIE (1 << 8) |
389 | 387 | ||
390 | struct tcp_out_options { | 388 | struct tcp_out_options { |
@@ -398,36 +396,6 @@ struct tcp_out_options { | |||
398 | struct tcp_fastopen_cookie *fastopen_cookie; /* Fast open cookie */ | 396 | struct tcp_fastopen_cookie *fastopen_cookie; /* Fast open cookie */ |
399 | }; | 397 | }; |
400 | 398 | ||
401 | /* The sysctl int routines are generic, so check consistency here. | ||
402 | */ | ||
403 | static u8 tcp_cookie_size_check(u8 desired) | ||
404 | { | ||
405 | int cookie_size; | ||
406 | |||
407 | if (desired > 0) | ||
408 | /* previously specified */ | ||
409 | return desired; | ||
410 | |||
411 | cookie_size = ACCESS_ONCE(sysctl_tcp_cookie_size); | ||
412 | if (cookie_size <= 0) | ||
413 | /* no default specified */ | ||
414 | return 0; | ||
415 | |||
416 | if (cookie_size <= TCP_COOKIE_MIN) | ||
417 | /* value too small, specify minimum */ | ||
418 | return TCP_COOKIE_MIN; | ||
419 | |||
420 | if (cookie_size >= TCP_COOKIE_MAX) | ||
421 | /* value too large, specify maximum */ | ||
422 | return TCP_COOKIE_MAX; | ||
423 | |||
424 | if (cookie_size & 1) | ||
425 | /* 8-bit multiple, illegal, fix it */ | ||
426 | cookie_size++; | ||
427 | |||
428 | return (u8)cookie_size; | ||
429 | } | ||
430 | |||
431 | /* Write previously computed TCP options to the packet. | 399 | /* Write previously computed TCP options to the packet. |
432 | * | 400 | * |
433 | * Beware: Something in the Internet is very sensitive to the ordering of | 401 | * Beware: Something in the Internet is very sensitive to the ordering of |
@@ -446,27 +414,9 @@ static void tcp_options_write(__be32 *ptr, struct tcp_sock *tp, | |||
446 | { | 414 | { |
447 | u16 options = opts->options; /* mungable copy */ | 415 | u16 options = opts->options; /* mungable copy */ |
448 | 416 | ||
449 | /* Having both authentication and cookies for security is redundant, | ||
450 | * and there's certainly not enough room. Instead, the cookie-less | ||
451 | * extension variant is proposed. | ||
452 | * | ||
453 | * Consider the pessimal case with authentication. The options | ||
454 | * could look like: | ||
455 | * COOKIE|MD5(20) + MSS(4) + SACK|TS(12) + WSCALE(4) == 40 | ||
456 | */ | ||
457 | if (unlikely(OPTION_MD5 & options)) { | 417 | if (unlikely(OPTION_MD5 & options)) { |
458 | if (unlikely(OPTION_COOKIE_EXTENSION & options)) { | 418 | *ptr++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) | |
459 | *ptr++ = htonl((TCPOPT_COOKIE << 24) | | 419 | (TCPOPT_MD5SIG << 8) | TCPOLEN_MD5SIG); |
460 | (TCPOLEN_COOKIE_BASE << 16) | | ||
461 | (TCPOPT_MD5SIG << 8) | | ||
462 | TCPOLEN_MD5SIG); | ||
463 | } else { | ||
464 | *ptr++ = htonl((TCPOPT_NOP << 24) | | ||
465 | (TCPOPT_NOP << 16) | | ||
466 | (TCPOPT_MD5SIG << 8) | | ||
467 | TCPOLEN_MD5SIG); | ||
468 | } | ||
469 | options &= ~OPTION_COOKIE_EXTENSION; | ||
470 | /* overload cookie hash location */ | 420 | /* overload cookie hash location */ |
471 | opts->hash_location = (__u8 *)ptr; | 421 | opts->hash_location = (__u8 *)ptr; |
472 | ptr += 4; | 422 | ptr += 4; |
@@ -495,44 +445,6 @@ static void tcp_options_write(__be32 *ptr, struct tcp_sock *tp, | |||
495 | *ptr++ = htonl(opts->tsecr); | 445 | *ptr++ = htonl(opts->tsecr); |
496 | } | 446 | } |
497 | 447 | ||
498 | /* Specification requires after timestamp, so do it now. | ||
499 | * | ||
500 | * Consider the pessimal case without authentication. The options | ||
501 | * could look like: | ||
502 | * MSS(4) + SACK|TS(12) + COOKIE(20) + WSCALE(4) == 40 | ||
503 | */ | ||
504 | if (unlikely(OPTION_COOKIE_EXTENSION & options)) { | ||
505 | __u8 *cookie_copy = opts->hash_location; | ||
506 | u8 cookie_size = opts->hash_size; | ||
507 | |||
508 | /* 8-bit multiple handled in tcp_cookie_size_check() above, | ||
509 | * and elsewhere. | ||
510 | */ | ||
511 | if (0x2 & cookie_size) { | ||
512 | __u8 *p = (__u8 *)ptr; | ||
513 | |||
514 | /* 16-bit multiple */ | ||
515 | *p++ = TCPOPT_COOKIE; | ||
516 | *p++ = TCPOLEN_COOKIE_BASE + cookie_size; | ||
517 | *p++ = *cookie_copy++; | ||
518 | *p++ = *cookie_copy++; | ||
519 | ptr++; | ||
520 | cookie_size -= 2; | ||
521 | } else { | ||
522 | /* 32-bit multiple */ | ||
523 | *ptr++ = htonl(((TCPOPT_NOP << 24) | | ||
524 | (TCPOPT_NOP << 16) | | ||
525 | (TCPOPT_COOKIE << 8) | | ||
526 | TCPOLEN_COOKIE_BASE) + | ||
527 | cookie_size); | ||
528 | } | ||
529 | |||
530 | if (cookie_size > 0) { | ||
531 | memcpy(ptr, cookie_copy, cookie_size); | ||
532 | ptr += (cookie_size / 4); | ||
533 | } | ||
534 | } | ||
535 | |||
536 | if (unlikely(OPTION_SACK_ADVERTISE & options)) { | 448 | if (unlikely(OPTION_SACK_ADVERTISE & options)) { |
537 | *ptr++ = htonl((TCPOPT_NOP << 24) | | 449 | *ptr++ = htonl((TCPOPT_NOP << 24) | |
538 | (TCPOPT_NOP << 16) | | 450 | (TCPOPT_NOP << 16) | |
@@ -591,11 +503,7 @@ static unsigned int tcp_syn_options(struct sock *sk, struct sk_buff *skb, | |||
591 | struct tcp_md5sig_key **md5) | 503 | struct tcp_md5sig_key **md5) |
592 | { | 504 | { |
593 | struct tcp_sock *tp = tcp_sk(sk); | 505 | struct tcp_sock *tp = tcp_sk(sk); |
594 | struct tcp_cookie_values *cvp = tp->cookie_values; | ||
595 | unsigned int remaining = MAX_TCP_OPTION_SPACE; | 506 | unsigned int remaining = MAX_TCP_OPTION_SPACE; |
596 | u8 cookie_size = (!tp->rx_opt.cookie_out_never && cvp != NULL) ? | ||
597 | tcp_cookie_size_check(cvp->cookie_desired) : | ||
598 | 0; | ||
599 | struct tcp_fastopen_request *fastopen = tp->fastopen_req; | 507 | struct tcp_fastopen_request *fastopen = tp->fastopen_req; |
600 | 508 | ||
601 | #ifdef CONFIG_TCP_MD5SIG | 509 | #ifdef CONFIG_TCP_MD5SIG |
@@ -647,52 +555,7 @@ static unsigned int tcp_syn_options(struct sock *sk, struct sk_buff *skb, | |||
647 | tp->syn_fastopen = 1; | 555 | tp->syn_fastopen = 1; |
648 | } | 556 | } |
649 | } | 557 | } |
650 | /* Note that timestamps are required by the specification. | ||
651 | * | ||
652 | * Odd numbers of bytes are prohibited by the specification, ensuring | ||
653 | * that the cookie is 16-bit aligned, and the resulting cookie pair is | ||
654 | * 32-bit aligned. | ||
655 | */ | ||
656 | if (*md5 == NULL && | ||
657 | (OPTION_TS & opts->options) && | ||
658 | cookie_size > 0) { | ||
659 | int need = TCPOLEN_COOKIE_BASE + cookie_size; | ||
660 | |||
661 | if (0x2 & need) { | ||
662 | /* 32-bit multiple */ | ||
663 | need += 2; /* NOPs */ | ||
664 | |||
665 | if (need > remaining) { | ||
666 | /* try shrinking cookie to fit */ | ||
667 | cookie_size -= 2; | ||
668 | need -= 4; | ||
669 | } | ||
670 | } | ||
671 | while (need > remaining && TCP_COOKIE_MIN <= cookie_size) { | ||
672 | cookie_size -= 4; | ||
673 | need -= 4; | ||
674 | } | ||
675 | if (TCP_COOKIE_MIN <= cookie_size) { | ||
676 | opts->options |= OPTION_COOKIE_EXTENSION; | ||
677 | opts->hash_location = (__u8 *)&cvp->cookie_pair[0]; | ||
678 | opts->hash_size = cookie_size; | ||
679 | |||
680 | /* Remember for future incarnations. */ | ||
681 | cvp->cookie_desired = cookie_size; | ||
682 | |||
683 | if (cvp->cookie_desired != cvp->cookie_pair_size) { | ||
684 | /* Currently use random bytes as a nonce, | ||
685 | * assuming these are completely unpredictable | ||
686 | * by hostile users of the same system. | ||
687 | */ | ||
688 | get_random_bytes(&cvp->cookie_pair[0], | ||
689 | cookie_size); | ||
690 | cvp->cookie_pair_size = cookie_size; | ||
691 | } | ||
692 | 558 | ||
693 | remaining -= need; | ||
694 | } | ||
695 | } | ||
696 | return MAX_TCP_OPTION_SPACE - remaining; | 559 | return MAX_TCP_OPTION_SPACE - remaining; |
697 | } | 560 | } |
698 | 561 | ||
@@ -702,14 +565,10 @@ static unsigned int tcp_synack_options(struct sock *sk, | |||
702 | unsigned int mss, struct sk_buff *skb, | 565 | unsigned int mss, struct sk_buff *skb, |
703 | struct tcp_out_options *opts, | 566 | struct tcp_out_options *opts, |
704 | struct tcp_md5sig_key **md5, | 567 | struct tcp_md5sig_key **md5, |
705 | struct tcp_extend_values *xvp, | ||
706 | struct tcp_fastopen_cookie *foc) | 568 | struct tcp_fastopen_cookie *foc) |
707 | { | 569 | { |
708 | struct inet_request_sock *ireq = inet_rsk(req); | 570 | struct inet_request_sock *ireq = inet_rsk(req); |
709 | unsigned int remaining = MAX_TCP_OPTION_SPACE; | 571 | unsigned int remaining = MAX_TCP_OPTION_SPACE; |
710 | u8 cookie_plus = (xvp != NULL && !xvp->cookie_out_never) ? | ||
711 | xvp->cookie_plus : | ||
712 | 0; | ||
713 | 572 | ||
714 | #ifdef CONFIG_TCP_MD5SIG | 573 | #ifdef CONFIG_TCP_MD5SIG |
715 | *md5 = tcp_rsk(req)->af_specific->md5_lookup(sk, req); | 574 | *md5 = tcp_rsk(req)->af_specific->md5_lookup(sk, req); |
@@ -757,28 +616,7 @@ static unsigned int tcp_synack_options(struct sock *sk, | |||
757 | remaining -= need; | 616 | remaining -= need; |
758 | } | 617 | } |
759 | } | 618 | } |
760 | /* Similar rationale to tcp_syn_options() applies here, too. | 619 | |
761 | * If the <SYN> options fit, the same options should fit now! | ||
762 | */ | ||
763 | if (*md5 == NULL && | ||
764 | ireq->tstamp_ok && | ||
765 | cookie_plus > TCPOLEN_COOKIE_BASE) { | ||
766 | int need = cookie_plus; /* has TCPOLEN_COOKIE_BASE */ | ||
767 | |||
768 | if (0x2 & need) { | ||
769 | /* 32-bit multiple */ | ||
770 | need += 2; /* NOPs */ | ||
771 | } | ||
772 | if (need <= remaining) { | ||
773 | opts->options |= OPTION_COOKIE_EXTENSION; | ||
774 | opts->hash_size = cookie_plus - TCPOLEN_COOKIE_BASE; | ||
775 | remaining -= need; | ||
776 | } else { | ||
777 | /* There's no error return, so flag it. */ | ||
778 | xvp->cookie_out_never = 1; /* true */ | ||
779 | opts->hash_size = 0; | ||
780 | } | ||
781 | } | ||
782 | return MAX_TCP_OPTION_SPACE - remaining; | 620 | return MAX_TCP_OPTION_SPACE - remaining; |
783 | } | 621 | } |
784 | 622 | ||
@@ -1958,6 +1796,9 @@ static int tcp_mtu_probe(struct sock *sk) | |||
1958 | * snd_up-64k-mss .. snd_up cannot be large. However, taking into | 1796 | * snd_up-64k-mss .. snd_up cannot be large. However, taking into |
1959 | * account rare use of URG, this is not a big flaw. | 1797 | * account rare use of URG, this is not a big flaw. |
1960 | * | 1798 | * |
1799 | * Send at most one packet when push_one > 0. Temporarily ignore | ||
1800 | * cwnd limit to force at most one packet out when push_one == 2. | ||
1801 | |||
1961 | * Returns true, if no segments are in flight and we have queued segments, | 1802 | * Returns true, if no segments are in flight and we have queued segments, |
1962 | * but cannot send anything now because of SWS or another problem. | 1803 | * but cannot send anything now because of SWS or another problem. |
1963 | */ | 1804 | */ |
@@ -1993,8 +1834,13 @@ static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle, | |||
1993 | goto repair; /* Skip network transmission */ | 1834 | goto repair; /* Skip network transmission */ |
1994 | 1835 | ||
1995 | cwnd_quota = tcp_cwnd_test(tp, skb); | 1836 | cwnd_quota = tcp_cwnd_test(tp, skb); |
1996 | if (!cwnd_quota) | 1837 | if (!cwnd_quota) { |
1997 | break; | 1838 | if (push_one == 2) |
1839 | /* Force out a loss probe pkt. */ | ||
1840 | cwnd_quota = 1; | ||
1841 | else | ||
1842 | break; | ||
1843 | } | ||
1998 | 1844 | ||
1999 | if (unlikely(!tcp_snd_wnd_test(tp, skb, mss_now))) | 1845 | if (unlikely(!tcp_snd_wnd_test(tp, skb, mss_now))) |
2000 | break; | 1846 | break; |
@@ -2048,10 +1894,129 @@ repair: | |||
2048 | if (likely(sent_pkts)) { | 1894 | if (likely(sent_pkts)) { |
2049 | if (tcp_in_cwnd_reduction(sk)) | 1895 | if (tcp_in_cwnd_reduction(sk)) |
2050 | tp->prr_out += sent_pkts; | 1896 | tp->prr_out += sent_pkts; |
1897 | |||
1898 | /* Send one loss probe per tail loss episode. */ | ||
1899 | if (push_one != 2) | ||
1900 | tcp_schedule_loss_probe(sk); | ||
2051 | tcp_cwnd_validate(sk); | 1901 | tcp_cwnd_validate(sk); |
2052 | return false; | 1902 | return false; |
2053 | } | 1903 | } |
2054 | return !tp->packets_out && tcp_send_head(sk); | 1904 | return (push_one == 2) || (!tp->packets_out && tcp_send_head(sk)); |
1905 | } | ||
1906 | |||
1907 | bool tcp_schedule_loss_probe(struct sock *sk) | ||
1908 | { | ||
1909 | struct inet_connection_sock *icsk = inet_csk(sk); | ||
1910 | struct tcp_sock *tp = tcp_sk(sk); | ||
1911 | u32 timeout, tlp_time_stamp, rto_time_stamp; | ||
1912 | u32 rtt = tp->srtt >> 3; | ||
1913 | |||
1914 | if (WARN_ON(icsk->icsk_pending == ICSK_TIME_EARLY_RETRANS)) | ||
1915 | return false; | ||
1916 | /* No consecutive loss probes. */ | ||
1917 | if (WARN_ON(icsk->icsk_pending == ICSK_TIME_LOSS_PROBE)) { | ||
1918 | tcp_rearm_rto(sk); | ||
1919 | return false; | ||
1920 | } | ||
1921 | /* Don't do any loss probe on a Fast Open connection before 3WHS | ||
1922 | * finishes. | ||
1923 | */ | ||
1924 | if (sk->sk_state == TCP_SYN_RECV) | ||
1925 | return false; | ||
1926 | |||
1927 | /* TLP is only scheduled when next timer event is RTO. */ | ||
1928 | if (icsk->icsk_pending != ICSK_TIME_RETRANS) | ||
1929 | return false; | ||
1930 | |||
1931 | /* Schedule a loss probe in 2*RTT for SACK capable connections | ||
1932 | * in Open state, that are either limited by cwnd or application. | ||
1933 | */ | ||
1934 | if (sysctl_tcp_early_retrans < 3 || !rtt || !tp->packets_out || | ||
1935 | !tcp_is_sack(tp) || inet_csk(sk)->icsk_ca_state != TCP_CA_Open) | ||
1936 | return false; | ||
1937 | |||
1938 | if ((tp->snd_cwnd > tcp_packets_in_flight(tp)) && | ||
1939 | tcp_send_head(sk)) | ||
1940 | return false; | ||
1941 | |||
1942 | /* Probe timeout is at least 1.5*rtt + TCP_DELACK_MAX to account | ||
1943 | * for delayed ack when there's one outstanding packet. | ||
1944 | */ | ||
1945 | timeout = rtt << 1; | ||
1946 | if (tp->packets_out == 1) | ||
1947 | timeout = max_t(u32, timeout, | ||
1948 | (rtt + (rtt >> 1) + TCP_DELACK_MAX)); | ||
1949 | timeout = max_t(u32, timeout, msecs_to_jiffies(10)); | ||
1950 | |||
1951 | /* If RTO is shorter, just schedule TLP in its place. */ | ||
1952 | tlp_time_stamp = tcp_time_stamp + timeout; | ||
1953 | rto_time_stamp = (u32)inet_csk(sk)->icsk_timeout; | ||
1954 | if ((s32)(tlp_time_stamp - rto_time_stamp) > 0) { | ||
1955 | s32 delta = rto_time_stamp - tcp_time_stamp; | ||
1956 | if (delta > 0) | ||
1957 | timeout = delta; | ||
1958 | } | ||
1959 | |||
1960 | inet_csk_reset_xmit_timer(sk, ICSK_TIME_LOSS_PROBE, timeout, | ||
1961 | TCP_RTO_MAX); | ||
1962 | return true; | ||
1963 | } | ||
1964 | |||
1965 | /* When probe timeout (PTO) fires, send a new segment if one exists, else | ||
1966 | * retransmit the last segment. | ||
1967 | */ | ||
1968 | void tcp_send_loss_probe(struct sock *sk) | ||
1969 | { | ||
1970 | struct tcp_sock *tp = tcp_sk(sk); | ||
1971 | struct sk_buff *skb; | ||
1972 | int pcount; | ||
1973 | int mss = tcp_current_mss(sk); | ||
1974 | int err = -1; | ||
1975 | |||
1976 | if (tcp_send_head(sk) != NULL) { | ||
1977 | err = tcp_write_xmit(sk, mss, TCP_NAGLE_OFF, 2, GFP_ATOMIC); | ||
1978 | goto rearm_timer; | ||
1979 | } | ||
1980 | |||
1981 | /* At most one outstanding TLP retransmission. */ | ||
1982 | if (tp->tlp_high_seq) | ||
1983 | goto rearm_timer; | ||
1984 | |||
1985 | /* Retransmit last segment. */ | ||
1986 | skb = tcp_write_queue_tail(sk); | ||
1987 | if (WARN_ON(!skb)) | ||
1988 | goto rearm_timer; | ||
1989 | |||
1990 | pcount = tcp_skb_pcount(skb); | ||
1991 | if (WARN_ON(!pcount)) | ||
1992 | goto rearm_timer; | ||
1993 | |||
1994 | if ((pcount > 1) && (skb->len > (pcount - 1) * mss)) { | ||
1995 | if (unlikely(tcp_fragment(sk, skb, (pcount - 1) * mss, mss))) | ||
1996 | goto rearm_timer; | ||
1997 | skb = tcp_write_queue_tail(sk); | ||
1998 | } | ||
1999 | |||
2000 | if (WARN_ON(!skb || !tcp_skb_pcount(skb))) | ||
2001 | goto rearm_timer; | ||
2002 | |||
2003 | /* Probe with zero data doesn't trigger fast recovery. */ | ||
2004 | if (skb->len > 0) | ||
2005 | err = __tcp_retransmit_skb(sk, skb); | ||
2006 | |||
2007 | /* Record snd_nxt for loss detection. */ | ||
2008 | if (likely(!err)) | ||
2009 | tp->tlp_high_seq = tp->snd_nxt; | ||
2010 | |||
2011 | rearm_timer: | ||
2012 | inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, | ||
2013 | inet_csk(sk)->icsk_rto, | ||
2014 | TCP_RTO_MAX); | ||
2015 | |||
2016 | if (likely(!err)) | ||
2017 | NET_INC_STATS_BH(sock_net(sk), | ||
2018 | LINUX_MIB_TCPLOSSPROBES); | ||
2019 | return; | ||
2055 | } | 2020 | } |
2056 | 2021 | ||
2057 | /* Push out any pending frames which were held back due to | 2022 | /* Push out any pending frames which were held back due to |
@@ -2672,32 +2637,24 @@ int tcp_send_synack(struct sock *sk) | |||
2672 | * sk: listener socket | 2637 | * sk: listener socket |
2673 | * dst: dst entry attached to the SYNACK | 2638 | * dst: dst entry attached to the SYNACK |
2674 | * req: request_sock pointer | 2639 | * req: request_sock pointer |
2675 | * rvp: request_values pointer | ||
2676 | * | 2640 | * |
2677 | * Allocate one skb and build a SYNACK packet. | 2641 | * Allocate one skb and build a SYNACK packet. |
2678 | * @dst is consumed : Caller should not use it again. | 2642 | * @dst is consumed : Caller should not use it again. |
2679 | */ | 2643 | */ |
2680 | struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst, | 2644 | struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst, |
2681 | struct request_sock *req, | 2645 | struct request_sock *req, |
2682 | struct request_values *rvp, | ||
2683 | struct tcp_fastopen_cookie *foc) | 2646 | struct tcp_fastopen_cookie *foc) |
2684 | { | 2647 | { |
2685 | struct tcp_out_options opts; | 2648 | struct tcp_out_options opts; |
2686 | struct tcp_extend_values *xvp = tcp_xv(rvp); | ||
2687 | struct inet_request_sock *ireq = inet_rsk(req); | 2649 | struct inet_request_sock *ireq = inet_rsk(req); |
2688 | struct tcp_sock *tp = tcp_sk(sk); | 2650 | struct tcp_sock *tp = tcp_sk(sk); |
2689 | const struct tcp_cookie_values *cvp = tp->cookie_values; | ||
2690 | struct tcphdr *th; | 2651 | struct tcphdr *th; |
2691 | struct sk_buff *skb; | 2652 | struct sk_buff *skb; |
2692 | struct tcp_md5sig_key *md5; | 2653 | struct tcp_md5sig_key *md5; |
2693 | int tcp_header_size; | 2654 | int tcp_header_size; |
2694 | int mss; | 2655 | int mss; |
2695 | int s_data_desired = 0; | ||
2696 | 2656 | ||
2697 | if (cvp != NULL && cvp->s_data_constant && cvp->s_data_desired) | 2657 | skb = alloc_skb(MAX_TCP_HEADER + 15, sk_gfp_atomic(sk, GFP_ATOMIC)); |
2698 | s_data_desired = cvp->s_data_desired; | ||
2699 | skb = alloc_skb(MAX_TCP_HEADER + 15 + s_data_desired, | ||
2700 | sk_gfp_atomic(sk, GFP_ATOMIC)); | ||
2701 | if (unlikely(!skb)) { | 2658 | if (unlikely(!skb)) { |
2702 | dst_release(dst); | 2659 | dst_release(dst); |
2703 | return NULL; | 2660 | return NULL; |
@@ -2739,9 +2696,8 @@ struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst, | |||
2739 | else | 2696 | else |
2740 | #endif | 2697 | #endif |
2741 | TCP_SKB_CB(skb)->when = tcp_time_stamp; | 2698 | TCP_SKB_CB(skb)->when = tcp_time_stamp; |
2742 | tcp_header_size = tcp_synack_options(sk, req, mss, | 2699 | tcp_header_size = tcp_synack_options(sk, req, mss, skb, &opts, &md5, |
2743 | skb, &opts, &md5, xvp, foc) | 2700 | foc) + sizeof(*th); |
2744 | + sizeof(*th); | ||
2745 | 2701 | ||
2746 | skb_push(skb, tcp_header_size); | 2702 | skb_push(skb, tcp_header_size); |
2747 | skb_reset_transport_header(skb); | 2703 | skb_reset_transport_header(skb); |
@@ -2759,40 +2715,6 @@ struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst, | |||
2759 | tcp_init_nondata_skb(skb, tcp_rsk(req)->snt_isn, | 2715 | tcp_init_nondata_skb(skb, tcp_rsk(req)->snt_isn, |
2760 | TCPHDR_SYN | TCPHDR_ACK); | 2716 | TCPHDR_SYN | TCPHDR_ACK); |
2761 | 2717 | ||
2762 | if (OPTION_COOKIE_EXTENSION & opts.options) { | ||
2763 | if (s_data_desired) { | ||
2764 | u8 *buf = skb_put(skb, s_data_desired); | ||
2765 | |||
2766 | /* copy data directly from the listening socket. */ | ||
2767 | memcpy(buf, cvp->s_data_payload, s_data_desired); | ||
2768 | TCP_SKB_CB(skb)->end_seq += s_data_desired; | ||
2769 | } | ||
2770 | |||
2771 | if (opts.hash_size > 0) { | ||
2772 | __u32 workspace[SHA_WORKSPACE_WORDS]; | ||
2773 | u32 *mess = &xvp->cookie_bakery[COOKIE_DIGEST_WORDS]; | ||
2774 | u32 *tail = &mess[COOKIE_MESSAGE_WORDS-1]; | ||
2775 | |||
2776 | /* Secret recipe depends on the Timestamp, (future) | ||
2777 | * Sequence and Acknowledgment Numbers, Initiator | ||
2778 | * Cookie, and others handled by IP variant caller. | ||
2779 | */ | ||
2780 | *tail-- ^= opts.tsval; | ||
2781 | *tail-- ^= tcp_rsk(req)->rcv_isn + 1; | ||
2782 | *tail-- ^= TCP_SKB_CB(skb)->seq + 1; | ||
2783 | |||
2784 | /* recommended */ | ||
2785 | *tail-- ^= (((__force u32)th->dest << 16) | (__force u32)th->source); | ||
2786 | *tail-- ^= (u32)(unsigned long)cvp; /* per sockopt */ | ||
2787 | |||
2788 | sha_transform((__u32 *)&xvp->cookie_bakery[0], | ||
2789 | (char *)mess, | ||
2790 | &workspace[0]); | ||
2791 | opts.hash_location = | ||
2792 | (__u8 *)&xvp->cookie_bakery[0]; | ||
2793 | } | ||
2794 | } | ||
2795 | |||
2796 | th->seq = htonl(TCP_SKB_CB(skb)->seq); | 2718 | th->seq = htonl(TCP_SKB_CB(skb)->seq); |
2797 | /* XXX data is queued and acked as is. No buffer/window check */ | 2719 | /* XXX data is queued and acked as is. No buffer/window check */ |
2798 | th->ack_seq = htonl(tcp_rsk(req)->rcv_nxt); | 2720 | th->ack_seq = htonl(tcp_rsk(req)->rcv_nxt); |