aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv4/tcp_input.c
diff options
context:
space:
mode:
authorEric Dumazet <edumazet@google.com>2017-04-18 12:45:52 -0400
committerDavid S. Miller <davem@davemloft.net>2017-04-20 15:42:11 -0400
commit0f9fa831aecfc297b7b45d4f046759bcefcf87f0 (patch)
tree24e08556cbd89b183cfb02743e268f61346e9997 /net/ipv4/tcp_input.c
parent3d4762639dd36a5f0f433f0c9d82e9743dc21a33 (diff)
tcp: remove poll() flakes with FastOpen
When using TCP FastOpen for an active session, we send one wakeup event from tcp_finish_connect(), right before the data eventually contained in the received SYNACK is queued to sk->sk_receive_queue. This means that depending on machine load or luck, poll() users might receive POLLOUT events instead of POLLIN|POLLOUT To fix this, we need to move the call to sk->sk_state_change() after the (optional) call to tcp_rcv_fastopen_synack() Signed-off-by: Eric Dumazet <edumazet@google.com> Acked-by: Yuchung Cheng <ycheng@google.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv4/tcp_input.c')
-rw-r--r--net/ipv4/tcp_input.c16
1 files changed, 9 insertions, 7 deletions
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 37e2aa925f62..341f021f02a2 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -5580,10 +5580,6 @@ void tcp_finish_connect(struct sock *sk, struct sk_buff *skb)
5580 else 5580 else
5581 tp->pred_flags = 0; 5581 tp->pred_flags = 0;
5582 5582
5583 if (!sock_flag(sk, SOCK_DEAD)) {
5584 sk->sk_state_change(sk);
5585 sk_wake_async(sk, SOCK_WAKE_IO, POLL_OUT);
5586 }
5587} 5583}
5588 5584
5589static bool tcp_rcv_fastopen_synack(struct sock *sk, struct sk_buff *synack, 5585static bool tcp_rcv_fastopen_synack(struct sock *sk, struct sk_buff *synack,
@@ -5652,6 +5648,7 @@ static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb,
5652 struct tcp_sock *tp = tcp_sk(sk); 5648 struct tcp_sock *tp = tcp_sk(sk);
5653 struct tcp_fastopen_cookie foc = { .len = -1 }; 5649 struct tcp_fastopen_cookie foc = { .len = -1 };
5654 int saved_clamp = tp->rx_opt.mss_clamp; 5650 int saved_clamp = tp->rx_opt.mss_clamp;
5651 bool fastopen_fail;
5655 5652
5656 tcp_parse_options(skb, &tp->rx_opt, 0, &foc); 5653 tcp_parse_options(skb, &tp->rx_opt, 0, &foc);
5657 if (tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr) 5654 if (tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr)
@@ -5755,10 +5752,15 @@ static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb,
5755 5752
5756 tcp_finish_connect(sk, skb); 5753 tcp_finish_connect(sk, skb);
5757 5754
5758 if ((tp->syn_fastopen || tp->syn_data) && 5755 fastopen_fail = (tp->syn_fastopen || tp->syn_data) &&
5759 tcp_rcv_fastopen_synack(sk, skb, &foc)) 5756 tcp_rcv_fastopen_synack(sk, skb, &foc);
5760 return -1;
5761 5757
5758 if (!sock_flag(sk, SOCK_DEAD)) {
5759 sk->sk_state_change(sk);
5760 sk_wake_async(sk, SOCK_WAKE_IO, POLL_OUT);
5761 }
5762 if (fastopen_fail)
5763 return -1;
5762 if (sk->sk_write_pending || 5764 if (sk->sk_write_pending ||
5763 icsk->icsk_accept_queue.rskq_defer_accept || 5765 icsk->icsk_accept_queue.rskq_defer_accept ||
5764 icsk->icsk_ack.pingpong) { 5766 icsk->icsk_ack.pingpong) {