diff options
Diffstat (limited to 'net/ipv4/tcp_output.c')
-rw-r--r-- | net/ipv4/tcp_output.c | 41 |
1 files changed, 28 insertions, 13 deletions
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index 20dfd892c86f..cfe6ffe1c177 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c | |||
@@ -702,7 +702,8 @@ static unsigned int tcp_synack_options(struct sock *sk, | |||
702 | unsigned int mss, struct sk_buff *skb, | 702 | unsigned int mss, struct sk_buff *skb, |
703 | struct tcp_out_options *opts, | 703 | struct tcp_out_options *opts, |
704 | struct tcp_md5sig_key **md5, | 704 | struct tcp_md5sig_key **md5, |
705 | struct tcp_extend_values *xvp) | 705 | struct tcp_extend_values *xvp, |
706 | struct tcp_fastopen_cookie *foc) | ||
706 | { | 707 | { |
707 | struct inet_request_sock *ireq = inet_rsk(req); | 708 | struct inet_request_sock *ireq = inet_rsk(req); |
708 | unsigned int remaining = MAX_TCP_OPTION_SPACE; | 709 | unsigned int remaining = MAX_TCP_OPTION_SPACE; |
@@ -747,7 +748,15 @@ static unsigned int tcp_synack_options(struct sock *sk, | |||
747 | if (unlikely(!ireq->tstamp_ok)) | 748 | if (unlikely(!ireq->tstamp_ok)) |
748 | remaining -= TCPOLEN_SACKPERM_ALIGNED; | 749 | remaining -= TCPOLEN_SACKPERM_ALIGNED; |
749 | } | 750 | } |
750 | 751 | if (foc != NULL) { | |
752 | u32 need = TCPOLEN_EXP_FASTOPEN_BASE + foc->len; | ||
753 | need = (need + 3) & ~3U; /* Align to 32 bits */ | ||
754 | if (remaining >= need) { | ||
755 | opts->options |= OPTION_FAST_OPEN_COOKIE; | ||
756 | opts->fastopen_cookie = foc; | ||
757 | remaining -= need; | ||
758 | } | ||
759 | } | ||
751 | /* Similar rationale to tcp_syn_options() applies here, too. | 760 | /* Similar rationale to tcp_syn_options() applies here, too. |
752 | * If the <SYN> options fit, the same options should fit now! | 761 | * If the <SYN> options fit, the same options should fit now! |
753 | */ | 762 | */ |
@@ -910,14 +919,18 @@ void tcp_release_cb(struct sock *sk) | |||
910 | if (flags & (1UL << TCP_TSQ_DEFERRED)) | 919 | if (flags & (1UL << TCP_TSQ_DEFERRED)) |
911 | tcp_tsq_handler(sk); | 920 | tcp_tsq_handler(sk); |
912 | 921 | ||
913 | if (flags & (1UL << TCP_WRITE_TIMER_DEFERRED)) | 922 | if (flags & (1UL << TCP_WRITE_TIMER_DEFERRED)) { |
914 | tcp_write_timer_handler(sk); | 923 | tcp_write_timer_handler(sk); |
915 | 924 | __sock_put(sk); | |
916 | if (flags & (1UL << TCP_DELACK_TIMER_DEFERRED)) | 925 | } |
926 | if (flags & (1UL << TCP_DELACK_TIMER_DEFERRED)) { | ||
917 | tcp_delack_timer_handler(sk); | 927 | tcp_delack_timer_handler(sk); |
918 | 928 | __sock_put(sk); | |
919 | if (flags & (1UL << TCP_MTU_REDUCED_DEFERRED)) | 929 | } |
930 | if (flags & (1UL << TCP_MTU_REDUCED_DEFERRED)) { | ||
920 | sk->sk_prot->mtu_reduced(sk); | 931 | sk->sk_prot->mtu_reduced(sk); |
932 | __sock_put(sk); | ||
933 | } | ||
921 | } | 934 | } |
922 | EXPORT_SYMBOL(tcp_release_cb); | 935 | EXPORT_SYMBOL(tcp_release_cb); |
923 | 936 | ||
@@ -2024,10 +2037,10 @@ static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle, | |||
2024 | if (push_one) | 2037 | if (push_one) |
2025 | break; | 2038 | break; |
2026 | } | 2039 | } |
2027 | if (inet_csk(sk)->icsk_ca_state == TCP_CA_Recovery) | ||
2028 | tp->prr_out += sent_pkts; | ||
2029 | 2040 | ||
2030 | if (likely(sent_pkts)) { | 2041 | if (likely(sent_pkts)) { |
2042 | if (tcp_in_cwnd_reduction(sk)) | ||
2043 | tp->prr_out += sent_pkts; | ||
2031 | tcp_cwnd_validate(sk); | 2044 | tcp_cwnd_validate(sk); |
2032 | return false; | 2045 | return false; |
2033 | } | 2046 | } |
@@ -2529,7 +2542,7 @@ begin_fwd: | |||
2529 | } | 2542 | } |
2530 | NET_INC_STATS_BH(sock_net(sk), mib_idx); | 2543 | NET_INC_STATS_BH(sock_net(sk), mib_idx); |
2531 | 2544 | ||
2532 | if (inet_csk(sk)->icsk_ca_state == TCP_CA_Recovery) | 2545 | if (tcp_in_cwnd_reduction(sk)) |
2533 | tp->prr_out += tcp_skb_pcount(skb); | 2546 | tp->prr_out += tcp_skb_pcount(skb); |
2534 | 2547 | ||
2535 | if (skb == tcp_write_queue_head(sk)) | 2548 | if (skb == tcp_write_queue_head(sk)) |
@@ -2654,7 +2667,8 @@ int tcp_send_synack(struct sock *sk) | |||
2654 | */ | 2667 | */ |
2655 | struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst, | 2668 | struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst, |
2656 | struct request_sock *req, | 2669 | struct request_sock *req, |
2657 | struct request_values *rvp) | 2670 | struct request_values *rvp, |
2671 | struct tcp_fastopen_cookie *foc) | ||
2658 | { | 2672 | { |
2659 | struct tcp_out_options opts; | 2673 | struct tcp_out_options opts; |
2660 | struct tcp_extend_values *xvp = tcp_xv(rvp); | 2674 | struct tcp_extend_values *xvp = tcp_xv(rvp); |
@@ -2714,7 +2728,7 @@ struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst, | |||
2714 | #endif | 2728 | #endif |
2715 | TCP_SKB_CB(skb)->when = tcp_time_stamp; | 2729 | TCP_SKB_CB(skb)->when = tcp_time_stamp; |
2716 | tcp_header_size = tcp_synack_options(sk, req, mss, | 2730 | tcp_header_size = tcp_synack_options(sk, req, mss, |
2717 | skb, &opts, &md5, xvp) | 2731 | skb, &opts, &md5, xvp, foc) |
2718 | + sizeof(*th); | 2732 | + sizeof(*th); |
2719 | 2733 | ||
2720 | skb_push(skb, tcp_header_size); | 2734 | skb_push(skb, tcp_header_size); |
@@ -2768,7 +2782,8 @@ struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst, | |||
2768 | } | 2782 | } |
2769 | 2783 | ||
2770 | th->seq = htonl(TCP_SKB_CB(skb)->seq); | 2784 | th->seq = htonl(TCP_SKB_CB(skb)->seq); |
2771 | th->ack_seq = htonl(tcp_rsk(req)->rcv_isn + 1); | 2785 | /* XXX data is queued and acked as is. No buffer/window check */ |
2786 | th->ack_seq = htonl(tcp_rsk(req)->rcv_nxt); | ||
2772 | 2787 | ||
2773 | /* RFC1323: The window in SYN & SYN/ACK segments is never scaled. */ | 2788 | /* RFC1323: The window in SYN & SYN/ACK segments is never scaled. */ |
2774 | th->window = htons(min(req->rcv_wnd, 65535U)); | 2789 | th->window = htons(min(req->rcv_wnd, 65535U)); |