aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv4/tcp_input.c
diff options
context:
space:
mode:
Diffstat (limited to 'net/ipv4/tcp_input.c')
-rw-r--r--net/ipv4/tcp_input.c217
1 files changed, 176 insertions, 41 deletions
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 8fdd27b17306..3a4d9b34bed4 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -866,7 +866,7 @@ static void tcp_update_reordering(struct sock *sk, const int metric,
866/* This must be called before lost_out is incremented */ 866/* This must be called before lost_out is incremented */
867static void tcp_verify_retransmit_hint(struct tcp_sock *tp, struct sk_buff *skb) 867static void tcp_verify_retransmit_hint(struct tcp_sock *tp, struct sk_buff *skb)
868{ 868{
869 if ((tp->retransmit_skb_hint == NULL) || 869 if (!tp->retransmit_skb_hint ||
870 before(TCP_SKB_CB(skb)->seq, 870 before(TCP_SKB_CB(skb)->seq,
871 TCP_SKB_CB(tp->retransmit_skb_hint)->seq)) 871 TCP_SKB_CB(tp->retransmit_skb_hint)->seq))
872 tp->retransmit_skb_hint = skb; 872 tp->retransmit_skb_hint = skb;
@@ -1256,7 +1256,7 @@ static u8 tcp_sacktag_one(struct sock *sk,
1256 fack_count += pcount; 1256 fack_count += pcount;
1257 1257
1258 /* Lost marker hint past SACKed? Tweak RFC3517 cnt */ 1258 /* Lost marker hint past SACKed? Tweak RFC3517 cnt */
1259 if (!tcp_is_fack(tp) && (tp->lost_skb_hint != NULL) && 1259 if (!tcp_is_fack(tp) && tp->lost_skb_hint &&
1260 before(start_seq, TCP_SKB_CB(tp->lost_skb_hint)->seq)) 1260 before(start_seq, TCP_SKB_CB(tp->lost_skb_hint)->seq))
1261 tp->lost_cnt_hint += pcount; 1261 tp->lost_cnt_hint += pcount;
1262 1262
@@ -1535,7 +1535,7 @@ static struct sk_buff *tcp_sacktag_walk(struct sk_buff *skb, struct sock *sk,
1535 if (!before(TCP_SKB_CB(skb)->seq, end_seq)) 1535 if (!before(TCP_SKB_CB(skb)->seq, end_seq))
1536 break; 1536 break;
1537 1537
1538 if ((next_dup != NULL) && 1538 if (next_dup &&
1539 before(TCP_SKB_CB(skb)->seq, next_dup->end_seq)) { 1539 before(TCP_SKB_CB(skb)->seq, next_dup->end_seq)) {
1540 in_sack = tcp_match_skb_to_sack(sk, skb, 1540 in_sack = tcp_match_skb_to_sack(sk, skb,
1541 next_dup->start_seq, 1541 next_dup->start_seq,
@@ -1551,7 +1551,7 @@ static struct sk_buff *tcp_sacktag_walk(struct sk_buff *skb, struct sock *sk,
1551 if (in_sack <= 0) { 1551 if (in_sack <= 0) {
1552 tmp = tcp_shift_skb_data(sk, skb, state, 1552 tmp = tcp_shift_skb_data(sk, skb, state,
1553 start_seq, end_seq, dup_sack); 1553 start_seq, end_seq, dup_sack);
1554 if (tmp != NULL) { 1554 if (tmp) {
1555 if (tmp != skb) { 1555 if (tmp != skb) {
1556 skb = tmp; 1556 skb = tmp;
1557 continue; 1557 continue;
@@ -1614,7 +1614,7 @@ static struct sk_buff *tcp_maybe_skipping_dsack(struct sk_buff *skb,
1614 struct tcp_sacktag_state *state, 1614 struct tcp_sacktag_state *state,
1615 u32 skip_to_seq) 1615 u32 skip_to_seq)
1616{ 1616{
1617 if (next_dup == NULL) 1617 if (!next_dup)
1618 return skb; 1618 return skb;
1619 1619
1620 if (before(next_dup->start_seq, skip_to_seq)) { 1620 if (before(next_dup->start_seq, skip_to_seq)) {
@@ -1783,7 +1783,7 @@ tcp_sacktag_write_queue(struct sock *sk, const struct sk_buff *ack_skb,
1783 if (tcp_highest_sack_seq(tp) == cache->end_seq) { 1783 if (tcp_highest_sack_seq(tp) == cache->end_seq) {
1784 /* ...but better entrypoint exists! */ 1784 /* ...but better entrypoint exists! */
1785 skb = tcp_highest_sack(sk); 1785 skb = tcp_highest_sack(sk);
1786 if (skb == NULL) 1786 if (!skb)
1787 break; 1787 break;
1788 state.fack_count = tp->fackets_out; 1788 state.fack_count = tp->fackets_out;
1789 cache++; 1789 cache++;
@@ -1798,7 +1798,7 @@ tcp_sacktag_write_queue(struct sock *sk, const struct sk_buff *ack_skb,
1798 1798
1799 if (!before(start_seq, tcp_highest_sack_seq(tp))) { 1799 if (!before(start_seq, tcp_highest_sack_seq(tp))) {
1800 skb = tcp_highest_sack(sk); 1800 skb = tcp_highest_sack(sk);
1801 if (skb == NULL) 1801 if (!skb)
1802 break; 1802 break;
1803 state.fack_count = tp->fackets_out; 1803 state.fack_count = tp->fackets_out;
1804 } 1804 }
@@ -3099,14 +3099,13 @@ static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets,
3099 if (sacked & TCPCB_SACKED_RETRANS) 3099 if (sacked & TCPCB_SACKED_RETRANS)
3100 tp->retrans_out -= acked_pcount; 3100 tp->retrans_out -= acked_pcount;
3101 flag |= FLAG_RETRANS_DATA_ACKED; 3101 flag |= FLAG_RETRANS_DATA_ACKED;
3102 } else { 3102 } else if (!(sacked & TCPCB_SACKED_ACKED)) {
3103 last_ackt = skb->skb_mstamp; 3103 last_ackt = skb->skb_mstamp;
3104 WARN_ON_ONCE(last_ackt.v64 == 0); 3104 WARN_ON_ONCE(last_ackt.v64 == 0);
3105 if (!first_ackt.v64) 3105 if (!first_ackt.v64)
3106 first_ackt = last_ackt; 3106 first_ackt = last_ackt;
3107 3107
3108 if (!(sacked & TCPCB_SACKED_ACKED)) 3108 reord = min(pkts_acked, reord);
3109 reord = min(pkts_acked, reord);
3110 if (!after(scb->end_seq, tp->high_seq)) 3109 if (!after(scb->end_seq, tp->high_seq))
3111 flag |= FLAG_ORIG_SACK_ACKED; 3110 flag |= FLAG_ORIG_SACK_ACKED;
3112 } 3111 }
@@ -3321,6 +3320,36 @@ static int tcp_ack_update_window(struct sock *sk, const struct sk_buff *skb, u32
3321 return flag; 3320 return flag;
3322} 3321}
3323 3322
3323/* Return true if we're currently rate-limiting out-of-window ACKs and
3324 * thus shouldn't send a dupack right now. We rate-limit dupacks in
3325 * response to out-of-window SYNs or ACKs to mitigate ACK loops or DoS
3326 * attacks that send repeated SYNs or ACKs for the same connection. To
3327 * do this, we do not send a duplicate SYNACK or ACK if the remote
3328 * endpoint is sending out-of-window SYNs or pure ACKs at a high rate.
3329 */
3330bool tcp_oow_rate_limited(struct net *net, const struct sk_buff *skb,
3331 int mib_idx, u32 *last_oow_ack_time)
3332{
3333 /* Data packets without SYNs are not likely part of an ACK loop. */
3334 if ((TCP_SKB_CB(skb)->seq != TCP_SKB_CB(skb)->end_seq) &&
3335 !tcp_hdr(skb)->syn)
3336 goto not_rate_limited;
3337
3338 if (*last_oow_ack_time) {
3339 s32 elapsed = (s32)(tcp_time_stamp - *last_oow_ack_time);
3340
3341 if (0 <= elapsed && elapsed < sysctl_tcp_invalid_ratelimit) {
3342 NET_INC_STATS_BH(net, mib_idx);
3343 return true; /* rate-limited: don't send yet! */
3344 }
3345 }
3346
3347 *last_oow_ack_time = tcp_time_stamp;
3348
3349not_rate_limited:
3350 return false; /* not rate-limited: go ahead, send dupack now! */
3351}
3352
3324/* RFC 5961 7 [ACK Throttling] */ 3353/* RFC 5961 7 [ACK Throttling] */
3325static void tcp_send_challenge_ack(struct sock *sk, const struct sk_buff *skb) 3354static void tcp_send_challenge_ack(struct sock *sk, const struct sk_buff *skb)
3326{ 3355{
@@ -3572,6 +3601,23 @@ old_ack:
3572 return 0; 3601 return 0;
3573} 3602}
3574 3603
3604static void tcp_parse_fastopen_option(int len, const unsigned char *cookie,
3605 bool syn, struct tcp_fastopen_cookie *foc,
3606 bool exp_opt)
3607{
3608 /* Valid only in SYN or SYN-ACK with an even length. */
3609 if (!foc || !syn || len < 0 || (len & 1))
3610 return;
3611
3612 if (len >= TCP_FASTOPEN_COOKIE_MIN &&
3613 len <= TCP_FASTOPEN_COOKIE_MAX)
3614 memcpy(foc->val, cookie, len);
3615 else if (len != 0)
3616 len = -1;
3617 foc->len = len;
3618 foc->exp = exp_opt;
3619}
3620
3575/* Look for tcp options. Normally only called on SYN and SYNACK packets. 3621/* Look for tcp options. Normally only called on SYN and SYNACK packets.
3576 * But, this can also be called on packets in the established flow when 3622 * But, this can also be called on packets in the established flow when
3577 * the fast version below fails. 3623 * the fast version below fails.
@@ -3661,21 +3707,22 @@ void tcp_parse_options(const struct sk_buff *skb,
3661 */ 3707 */
3662 break; 3708 break;
3663#endif 3709#endif
3710 case TCPOPT_FASTOPEN:
3711 tcp_parse_fastopen_option(
3712 opsize - TCPOLEN_FASTOPEN_BASE,
3713 ptr, th->syn, foc, false);
3714 break;
3715
3664 case TCPOPT_EXP: 3716 case TCPOPT_EXP:
3665 /* Fast Open option shares code 254 using a 3717 /* Fast Open option shares code 254 using a
3666 * 16 bits magic number. It's valid only in 3718 * 16 bits magic number.
3667 * SYN or SYN-ACK with an even size.
3668 */ 3719 */
3669 if (opsize < TCPOLEN_EXP_FASTOPEN_BASE || 3720 if (opsize >= TCPOLEN_EXP_FASTOPEN_BASE &&
3670 get_unaligned_be16(ptr) != TCPOPT_FASTOPEN_MAGIC || 3721 get_unaligned_be16(ptr) ==
3671 foc == NULL || !th->syn || (opsize & 1)) 3722 TCPOPT_FASTOPEN_MAGIC)
3672 break; 3723 tcp_parse_fastopen_option(opsize -
3673 foc->len = opsize - TCPOLEN_EXP_FASTOPEN_BASE; 3724 TCPOLEN_EXP_FASTOPEN_BASE,
3674 if (foc->len >= TCP_FASTOPEN_COOKIE_MIN && 3725 ptr + 2, th->syn, foc, true);
3675 foc->len <= TCP_FASTOPEN_COOKIE_MAX)
3676 memcpy(foc->val, ptr + 2, foc->len);
3677 else if (foc->len != 0)
3678 foc->len = -1;
3679 break; 3726 break;
3680 3727
3681 } 3728 }
@@ -4639,7 +4686,7 @@ static void tcp_collapse_ofo_queue(struct sock *sk)
4639 struct sk_buff *head; 4686 struct sk_buff *head;
4640 u32 start, end; 4687 u32 start, end;
4641 4688
4642 if (skb == NULL) 4689 if (!skb)
4643 return; 4690 return;
4644 4691
4645 start = TCP_SKB_CB(skb)->seq; 4692 start = TCP_SKB_CB(skb)->seq;
@@ -4770,7 +4817,7 @@ static bool tcp_should_expand_sndbuf(const struct sock *sk)
4770 return false; 4817 return false;
4771 4818
4772 /* If we filled the congestion window, do not expand. */ 4819 /* If we filled the congestion window, do not expand. */
4773 if (tp->packets_out >= tp->snd_cwnd) 4820 if (tcp_packets_in_flight(tp) >= tp->snd_cwnd)
4774 return false; 4821 return false;
4775 4822
4776 return true; 4823 return true;
@@ -4798,6 +4845,8 @@ static void tcp_check_space(struct sock *sk)
4798{ 4845{
4799 if (sock_flag(sk, SOCK_QUEUE_SHRUNK)) { 4846 if (sock_flag(sk, SOCK_QUEUE_SHRUNK)) {
4800 sock_reset_flag(sk, SOCK_QUEUE_SHRUNK); 4847 sock_reset_flag(sk, SOCK_QUEUE_SHRUNK);
4848 /* pairs with tcp_poll() */
4849 smp_mb__after_atomic();
4801 if (sk->sk_socket && 4850 if (sk->sk_socket &&
4802 test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) 4851 test_bit(SOCK_NOSPACE, &sk->sk_socket->flags))
4803 tcp_new_space(sk); 4852 tcp_new_space(sk);
@@ -5094,7 +5143,7 @@ void tcp_rcv_established(struct sock *sk, struct sk_buff *skb,
5094{ 5143{
5095 struct tcp_sock *tp = tcp_sk(sk); 5144 struct tcp_sock *tp = tcp_sk(sk);
5096 5145
5097 if (unlikely(sk->sk_rx_dst == NULL)) 5146 if (unlikely(!sk->sk_rx_dst))
5098 inet_csk(sk)->icsk_af_ops->sk_rx_dst_set(sk, skb); 5147 inet_csk(sk)->icsk_af_ops->sk_rx_dst_set(sk, skb);
5099 /* 5148 /*
5100 * Header prediction. 5149 * Header prediction.
@@ -5291,7 +5340,7 @@ void tcp_finish_connect(struct sock *sk, struct sk_buff *skb)
5291 5340
5292 tcp_set_state(sk, TCP_ESTABLISHED); 5341 tcp_set_state(sk, TCP_ESTABLISHED);
5293 5342
5294 if (skb != NULL) { 5343 if (skb) {
5295 icsk->icsk_af_ops->sk_rx_dst_set(sk, skb); 5344 icsk->icsk_af_ops->sk_rx_dst_set(sk, skb);
5296 security_inet_conn_established(sk, skb); 5345 security_inet_conn_established(sk, skb);
5297 } 5346 }
@@ -5329,8 +5378,8 @@ static bool tcp_rcv_fastopen_synack(struct sock *sk, struct sk_buff *synack,
5329{ 5378{
5330 struct tcp_sock *tp = tcp_sk(sk); 5379 struct tcp_sock *tp = tcp_sk(sk);
5331 struct sk_buff *data = tp->syn_data ? tcp_write_queue_head(sk) : NULL; 5380 struct sk_buff *data = tp->syn_data ? tcp_write_queue_head(sk) : NULL;
5332 u16 mss = tp->rx_opt.mss_clamp; 5381 u16 mss = tp->rx_opt.mss_clamp, try_exp = 0;
5333 bool syn_drop; 5382 bool syn_drop = false;
5334 5383
5335 if (mss == tp->rx_opt.user_mss) { 5384 if (mss == tp->rx_opt.user_mss) {
5336 struct tcp_options_received opt; 5385 struct tcp_options_received opt;
@@ -5342,16 +5391,25 @@ static bool tcp_rcv_fastopen_synack(struct sock *sk, struct sk_buff *synack,
5342 mss = opt.mss_clamp; 5391 mss = opt.mss_clamp;
5343 } 5392 }
5344 5393
5345 if (!tp->syn_fastopen) /* Ignore an unsolicited cookie */ 5394 if (!tp->syn_fastopen) {
5395 /* Ignore an unsolicited cookie */
5346 cookie->len = -1; 5396 cookie->len = -1;
5397 } else if (tp->total_retrans) {
5398 /* SYN timed out and the SYN-ACK neither has a cookie nor
5399 * acknowledges data. Presumably the remote received only
5400 * the retransmitted (regular) SYNs: either the original
5401 * SYN-data or the corresponding SYN-ACK was dropped.
5402 */
5403 syn_drop = (cookie->len < 0 && data);
5404 } else if (cookie->len < 0 && !tp->syn_data) {
5405 /* We requested a cookie but didn't get it. If we did not use
5406 * the (old) exp opt format then try so next time (try_exp=1).
5407 * Otherwise we go back to use the RFC7413 opt (try_exp=2).
5408 */
5409 try_exp = tp->syn_fastopen_exp ? 2 : 1;
5410 }
5347 5411
5348 /* The SYN-ACK neither has cookie nor acknowledges the data. Presumably 5412 tcp_fastopen_cache_set(sk, mss, cookie, syn_drop, try_exp);
5349 * the remote receives only the retransmitted (regular) SYNs: either
5350 * the original SYN-data or the corresponding SYN-ACK is lost.
5351 */
5352 syn_drop = (cookie->len <= 0 && data && tp->total_retrans);
5353
5354 tcp_fastopen_cache_set(sk, mss, cookie, syn_drop);
5355 5413
5356 if (data) { /* Retransmit unacked data in SYN */ 5414 if (data) { /* Retransmit unacked data in SYN */
5357 tcp_for_write_queue_from(data, sk) { 5415 tcp_for_write_queue_from(data, sk) {
@@ -5660,11 +5718,11 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
5660 } 5718 }
5661 5719
5662 req = tp->fastopen_rsk; 5720 req = tp->fastopen_rsk;
5663 if (req != NULL) { 5721 if (req) {
5664 WARN_ON_ONCE(sk->sk_state != TCP_SYN_RECV && 5722 WARN_ON_ONCE(sk->sk_state != TCP_SYN_RECV &&
5665 sk->sk_state != TCP_FIN_WAIT1); 5723 sk->sk_state != TCP_FIN_WAIT1);
5666 5724
5667 if (tcp_check_req(sk, skb, req, NULL, true) == NULL) 5725 if (!tcp_check_req(sk, skb, req, true))
5668 goto discard; 5726 goto discard;
5669 } 5727 }
5670 5728
@@ -5750,7 +5808,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
5750 * ACK we have received, this would have acknowledged 5808 * ACK we have received, this would have acknowledged
5751 * our SYNACK so stop the SYNACK timer. 5809 * our SYNACK so stop the SYNACK timer.
5752 */ 5810 */
5753 if (req != NULL) { 5811 if (req) {
5754 /* Return RST if ack_seq is invalid. 5812 /* Return RST if ack_seq is invalid.
5755 * Note that RFC793 only says to generate a 5813 * Note that RFC793 only says to generate a
5756 * DUPACK for it but for TCP Fast Open it seems 5814 * DUPACK for it but for TCP Fast Open it seems
@@ -5912,6 +5970,80 @@ static void tcp_ecn_create_request(struct request_sock *req,
5912 inet_rsk(req)->ecn_ok = 1; 5970 inet_rsk(req)->ecn_ok = 1;
5913} 5971}
5914 5972
5973static void tcp_openreq_init(struct request_sock *req,
5974 const struct tcp_options_received *rx_opt,
5975 struct sk_buff *skb, const struct sock *sk)
5976{
5977 struct inet_request_sock *ireq = inet_rsk(req);
5978
5979 req->rcv_wnd = 0; /* So that tcp_send_synack() knows! */
5980 req->cookie_ts = 0;
5981 tcp_rsk(req)->rcv_isn = TCP_SKB_CB(skb)->seq;
5982 tcp_rsk(req)->rcv_nxt = TCP_SKB_CB(skb)->seq + 1;
5983 tcp_rsk(req)->snt_synack = tcp_time_stamp;
5984 tcp_rsk(req)->last_oow_ack_time = 0;
5985 req->mss = rx_opt->mss_clamp;
5986 req->ts_recent = rx_opt->saw_tstamp ? rx_opt->rcv_tsval : 0;
5987 ireq->tstamp_ok = rx_opt->tstamp_ok;
5988 ireq->sack_ok = rx_opt->sack_ok;
5989 ireq->snd_wscale = rx_opt->snd_wscale;
5990 ireq->wscale_ok = rx_opt->wscale_ok;
5991 ireq->acked = 0;
5992 ireq->ecn_ok = 0;
5993 ireq->ir_rmt_port = tcp_hdr(skb)->source;
5994 ireq->ir_num = ntohs(tcp_hdr(skb)->dest);
5995 ireq->ir_mark = inet_request_mark(sk, skb);
5996}
5997
5998struct request_sock *inet_reqsk_alloc(const struct request_sock_ops *ops,
5999 struct sock *sk_listener)
6000{
6001 struct request_sock *req = reqsk_alloc(ops, sk_listener);
6002
6003 if (req) {
6004 struct inet_request_sock *ireq = inet_rsk(req);
6005
6006 kmemcheck_annotate_bitfield(ireq, flags);
6007 ireq->opt = NULL;
6008 atomic64_set(&ireq->ir_cookie, 0);
6009 ireq->ireq_state = TCP_NEW_SYN_RECV;
6010 write_pnet(&ireq->ireq_net, sock_net(sk_listener));
6011 ireq->ireq_family = sk_listener->sk_family;
6012 }
6013
6014 return req;
6015}
6016EXPORT_SYMBOL(inet_reqsk_alloc);
6017
6018/*
6019 * Return true if a syncookie should be sent
6020 */
6021static bool tcp_syn_flood_action(struct sock *sk,
6022 const struct sk_buff *skb,
6023 const char *proto)
6024{
6025 const char *msg = "Dropping request";
6026 bool want_cookie = false;
6027 struct listen_sock *lopt;
6028
6029#ifdef CONFIG_SYN_COOKIES
6030 if (sysctl_tcp_syncookies) {
6031 msg = "Sending cookies";
6032 want_cookie = true;
6033 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPREQQFULLDOCOOKIES);
6034 } else
6035#endif
6036 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPREQQFULLDROP);
6037
6038 lopt = inet_csk(sk)->icsk_accept_queue.listen_opt;
6039 if (!lopt->synflood_warned && sysctl_tcp_syncookies != 2) {
6040 lopt->synflood_warned = 1;
6041 pr_info("%s: Possible SYN flooding on port %d. %s. Check SNMP counters.\n",
6042 proto, ntohs(tcp_hdr(skb)->dest), msg);
6043 }
6044 return want_cookie;
6045}
6046
5915int tcp_conn_request(struct request_sock_ops *rsk_ops, 6047int tcp_conn_request(struct request_sock_ops *rsk_ops,
5916 const struct tcp_request_sock_ops *af_ops, 6048 const struct tcp_request_sock_ops *af_ops,
5917 struct sock *sk, struct sk_buff *skb) 6049 struct sock *sk, struct sk_buff *skb)
@@ -5949,7 +6081,7 @@ int tcp_conn_request(struct request_sock_ops *rsk_ops,
5949 goto drop; 6081 goto drop;
5950 } 6082 }
5951 6083
5952 req = inet_reqsk_alloc(rsk_ops); 6084 req = inet_reqsk_alloc(rsk_ops, sk);
5953 if (!req) 6085 if (!req)
5954 goto drop; 6086 goto drop;
5955 6087
@@ -5966,6 +6098,9 @@ int tcp_conn_request(struct request_sock_ops *rsk_ops,
5966 tmp_opt.tstamp_ok = tmp_opt.saw_tstamp; 6098 tmp_opt.tstamp_ok = tmp_opt.saw_tstamp;
5967 tcp_openreq_init(req, &tmp_opt, skb, sk); 6099 tcp_openreq_init(req, &tmp_opt, skb, sk);
5968 6100
6101 /* Note: tcp_v6_init_req() might override ir_iif for link locals */
6102 inet_rsk(req)->ir_iif = sk->sk_bound_dev_if;
6103
5969 af_ops->init_req(req, sk, skb); 6104 af_ops->init_req(req, sk, skb);
5970 6105
5971 if (security_inet_conn_request(sk, skb, req)) 6106 if (security_inet_conn_request(sk, skb, req))
@@ -6038,7 +6173,7 @@ int tcp_conn_request(struct request_sock_ops *rsk_ops,
6038 if (err || want_cookie) 6173 if (err || want_cookie)
6039 goto drop_and_free; 6174 goto drop_and_free;
6040 6175
6041 tcp_rsk(req)->listener = NULL; 6176 tcp_rsk(req)->tfo_listener = false;
6042 af_ops->queue_hash_add(sk, req, TCP_TIMEOUT_INIT); 6177 af_ops->queue_hash_add(sk, req, TCP_TIMEOUT_INIT);
6043 } 6178 }
6044 6179