diff options
author | Eric Dumazet <edumazet@google.com> | 2013-05-24 11:03:54 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2013-05-26 02:22:18 -0400 |
commit | 1f6afc81088a1f5a472b272408730c73b72c68aa (patch) | |
tree | cf38e6ca2b60e5915b8fb2ec44e478c6f3c4f350 /net/ipv4/tcp_input.c | |
parent | 42e52bf9e3ae80fd44b21ddfcd64c54e6db2ff76 (diff) |
tcp: remove one indentation level in tcp_rcv_state_process()
Remove one level of indentation 'introduced' in commit
c3ae62af8e75 (tcp: should drop incoming frames without ACK flag set)
if (true) {
...
}
@acceptable variable is a boolean.
This patch is a pure cleanup.
Signed-off-by: Eric Dumazet <edumazet@google.com>
Cc: Yuchung Cheng <ycheng@google.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv4/tcp_input.c')
-rw-r--r-- | net/ipv4/tcp_input.c | 269 |
1 files changed, 133 insertions, 136 deletions
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 8230cd6243aa..40614257d2c5 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c | |||
@@ -5536,6 +5536,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb, | |||
5536 | struct inet_connection_sock *icsk = inet_csk(sk); | 5536 | struct inet_connection_sock *icsk = inet_csk(sk); |
5537 | struct request_sock *req; | 5537 | struct request_sock *req; |
5538 | int queued = 0; | 5538 | int queued = 0; |
5539 | bool acceptable; | ||
5539 | 5540 | ||
5540 | tp->rx_opt.saw_tstamp = 0; | 5541 | tp->rx_opt.saw_tstamp = 0; |
5541 | 5542 | ||
@@ -5606,157 +5607,153 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb, | |||
5606 | return 0; | 5607 | return 0; |
5607 | 5608 | ||
5608 | /* step 5: check the ACK field */ | 5609 | /* step 5: check the ACK field */ |
5609 | if (true) { | 5610 | acceptable = tcp_ack(sk, skb, FLAG_SLOWPATH | |
5610 | int acceptable = tcp_ack(sk, skb, FLAG_SLOWPATH | | 5611 | FLAG_UPDATE_TS_RECENT) > 0; |
5611 | FLAG_UPDATE_TS_RECENT) > 0; | ||
5612 | |||
5613 | switch (sk->sk_state) { | ||
5614 | case TCP_SYN_RECV: | ||
5615 | if (acceptable) { | ||
5616 | /* Once we leave TCP_SYN_RECV, we no longer | ||
5617 | * need req so release it. | ||
5618 | */ | ||
5619 | if (req) { | ||
5620 | tcp_synack_rtt_meas(sk, req); | ||
5621 | tp->total_retrans = req->num_retrans; | ||
5622 | |||
5623 | reqsk_fastopen_remove(sk, req, false); | ||
5624 | } else { | ||
5625 | /* Make sure socket is routed, for | ||
5626 | * correct metrics. | ||
5627 | */ | ||
5628 | icsk->icsk_af_ops->rebuild_header(sk); | ||
5629 | tcp_init_congestion_control(sk); | ||
5630 | 5612 | ||
5631 | tcp_mtup_init(sk); | 5613 | switch (sk->sk_state) { |
5632 | tcp_init_buffer_space(sk); | 5614 | case TCP_SYN_RECV: |
5633 | tp->copied_seq = tp->rcv_nxt; | 5615 | if (acceptable) { |
5634 | } | 5616 | /* Once we leave TCP_SYN_RECV, we no longer |
5635 | smp_mb(); | 5617 | * need req so release it. |
5636 | tcp_set_state(sk, TCP_ESTABLISHED); | 5618 | */ |
5637 | sk->sk_state_change(sk); | 5619 | if (req) { |
5620 | tcp_synack_rtt_meas(sk, req); | ||
5621 | tp->total_retrans = req->num_retrans; | ||
5638 | 5622 | ||
5639 | /* Note, that this wakeup is only for marginal | 5623 | reqsk_fastopen_remove(sk, req, false); |
5640 | * crossed SYN case. Passively open sockets | 5624 | } else { |
5641 | * are not waked up, because sk->sk_sleep == | 5625 | /* Make sure socket is routed, for |
5642 | * NULL and sk->sk_socket == NULL. | 5626 | * correct metrics. |
5643 | */ | 5627 | */ |
5644 | if (sk->sk_socket) | 5628 | icsk->icsk_af_ops->rebuild_header(sk); |
5645 | sk_wake_async(sk, | 5629 | tcp_init_congestion_control(sk); |
5646 | SOCK_WAKE_IO, POLL_OUT); | ||
5647 | |||
5648 | tp->snd_una = TCP_SKB_CB(skb)->ack_seq; | ||
5649 | tp->snd_wnd = ntohs(th->window) << | ||
5650 | tp->rx_opt.snd_wscale; | ||
5651 | tcp_init_wl(tp, TCP_SKB_CB(skb)->seq); | ||
5652 | |||
5653 | if (tp->rx_opt.tstamp_ok) | ||
5654 | tp->advmss -= TCPOLEN_TSTAMP_ALIGNED; | ||
5655 | |||
5656 | if (req) { | ||
5657 | /* Re-arm the timer because data may | ||
5658 | * have been sent out. This is similar | ||
5659 | * to the regular data transmission case | ||
5660 | * when new data has just been ack'ed. | ||
5661 | * | ||
5662 | * (TFO) - we could try to be more | ||
5663 | * aggressive and retranmitting any data | ||
5664 | * sooner based on when they were sent | ||
5665 | * out. | ||
5666 | */ | ||
5667 | tcp_rearm_rto(sk); | ||
5668 | } else | ||
5669 | tcp_init_metrics(sk); | ||
5670 | 5630 | ||
5671 | /* Prevent spurious tcp_cwnd_restart() on | 5631 | tcp_mtup_init(sk); |
5672 | * first data packet. | 5632 | tcp_init_buffer_space(sk); |
5633 | tp->copied_seq = tp->rcv_nxt; | ||
5634 | } | ||
5635 | smp_mb(); | ||
5636 | tcp_set_state(sk, TCP_ESTABLISHED); | ||
5637 | sk->sk_state_change(sk); | ||
5638 | |||
5639 | /* Note, that this wakeup is only for marginal | ||
5640 | * crossed SYN case. Passively open sockets | ||
5641 | * are not waked up, because sk->sk_sleep == | ||
5642 | * NULL and sk->sk_socket == NULL. | ||
5643 | */ | ||
5644 | if (sk->sk_socket) | ||
5645 | sk_wake_async(sk, SOCK_WAKE_IO, POLL_OUT); | ||
5646 | |||
5647 | tp->snd_una = TCP_SKB_CB(skb)->ack_seq; | ||
5648 | tp->snd_wnd = ntohs(th->window) << | ||
5649 | tp->rx_opt.snd_wscale; | ||
5650 | tcp_init_wl(tp, TCP_SKB_CB(skb)->seq); | ||
5651 | |||
5652 | if (tp->rx_opt.tstamp_ok) | ||
5653 | tp->advmss -= TCPOLEN_TSTAMP_ALIGNED; | ||
5654 | |||
5655 | if (req) { | ||
5656 | /* Re-arm the timer because data may | ||
5657 | * have been sent out. This is similar | ||
5658 | * to the regular data transmission case | ||
5659 | * when new data has just been ack'ed. | ||
5660 | * | ||
5661 | * (TFO) - we could try to be more aggressive | ||
5662 | * and retransmitting any data sooner based | ||
5663 | * on when they are sent out. | ||
5673 | */ | 5664 | */ |
5674 | tp->lsndtime = tcp_time_stamp; | 5665 | tcp_rearm_rto(sk); |
5666 | } else | ||
5667 | tcp_init_metrics(sk); | ||
5675 | 5668 | ||
5676 | tcp_initialize_rcv_mss(sk); | 5669 | /* Prevent spurious tcp_cwnd_restart() on |
5677 | tcp_fast_path_on(tp); | 5670 | * first data packet. |
5678 | } else { | 5671 | */ |
5679 | return 1; | 5672 | tp->lsndtime = tcp_time_stamp; |
5680 | } | ||
5681 | break; | ||
5682 | 5673 | ||
5683 | case TCP_FIN_WAIT1: | 5674 | tcp_initialize_rcv_mss(sk); |
5684 | /* If we enter the TCP_FIN_WAIT1 state and we are a | 5675 | tcp_fast_path_on(tp); |
5685 | * Fast Open socket and this is the first acceptable | 5676 | } else { |
5686 | * ACK we have received, this would have acknowledged | 5677 | return 1; |
5687 | * our SYNACK so stop the SYNACK timer. | 5678 | } |
5679 | break; | ||
5680 | |||
5681 | case TCP_FIN_WAIT1: | ||
5682 | /* If we enter the TCP_FIN_WAIT1 state and we are a | ||
5683 | * Fast Open socket and this is the first acceptable | ||
5684 | * ACK we have received, this would have acknowledged | ||
5685 | * our SYNACK so stop the SYNACK timer. | ||
5686 | */ | ||
5687 | if (req != NULL) { | ||
5688 | /* Return RST if ack_seq is invalid. | ||
5689 | * Note that RFC793 only says to generate a | ||
5690 | * DUPACK for it but for TCP Fast Open it seems | ||
5691 | * better to treat this case like TCP_SYN_RECV | ||
5692 | * above. | ||
5688 | */ | 5693 | */ |
5689 | if (req != NULL) { | 5694 | if (!acceptable) |
5690 | /* Return RST if ack_seq is invalid. | 5695 | return 1; |
5691 | * Note that RFC793 only says to generate a | 5696 | /* We no longer need the request sock. */ |
5692 | * DUPACK for it but for TCP Fast Open it seems | 5697 | reqsk_fastopen_remove(sk, req, false); |
5693 | * better to treat this case like TCP_SYN_RECV | 5698 | tcp_rearm_rto(sk); |
5694 | * above. | 5699 | } |
5695 | */ | 5700 | if (tp->snd_una == tp->write_seq) { |
5696 | if (!acceptable) | 5701 | struct dst_entry *dst; |
5702 | |||
5703 | tcp_set_state(sk, TCP_FIN_WAIT2); | ||
5704 | sk->sk_shutdown |= SEND_SHUTDOWN; | ||
5705 | |||
5706 | dst = __sk_dst_get(sk); | ||
5707 | if (dst) | ||
5708 | dst_confirm(dst); | ||
5709 | |||
5710 | if (!sock_flag(sk, SOCK_DEAD)) { | ||
5711 | /* Wake up lingering close() */ | ||
5712 | sk->sk_state_change(sk); | ||
5713 | } else { | ||
5714 | int tmo; | ||
5715 | |||
5716 | if (tp->linger2 < 0 || | ||
5717 | (TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq && | ||
5718 | after(TCP_SKB_CB(skb)->end_seq - th->fin, tp->rcv_nxt))) { | ||
5719 | tcp_done(sk); | ||
5720 | NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPABORTONDATA); | ||
5697 | return 1; | 5721 | return 1; |
5698 | /* We no longer need the request sock. */ | 5722 | } |
5699 | reqsk_fastopen_remove(sk, req, false); | ||
5700 | tcp_rearm_rto(sk); | ||
5701 | } | ||
5702 | if (tp->snd_una == tp->write_seq) { | ||
5703 | struct dst_entry *dst; | ||
5704 | |||
5705 | tcp_set_state(sk, TCP_FIN_WAIT2); | ||
5706 | sk->sk_shutdown |= SEND_SHUTDOWN; | ||
5707 | |||
5708 | dst = __sk_dst_get(sk); | ||
5709 | if (dst) | ||
5710 | dst_confirm(dst); | ||
5711 | |||
5712 | if (!sock_flag(sk, SOCK_DEAD)) | ||
5713 | /* Wake up lingering close() */ | ||
5714 | sk->sk_state_change(sk); | ||
5715 | else { | ||
5716 | int tmo; | ||
5717 | |||
5718 | if (tp->linger2 < 0 || | ||
5719 | (TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq && | ||
5720 | after(TCP_SKB_CB(skb)->end_seq - th->fin, tp->rcv_nxt))) { | ||
5721 | tcp_done(sk); | ||
5722 | NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPABORTONDATA); | ||
5723 | return 1; | ||
5724 | } | ||
5725 | 5723 | ||
5726 | tmo = tcp_fin_time(sk); | 5724 | tmo = tcp_fin_time(sk); |
5727 | if (tmo > TCP_TIMEWAIT_LEN) { | 5725 | if (tmo > TCP_TIMEWAIT_LEN) { |
5728 | inet_csk_reset_keepalive_timer(sk, tmo - TCP_TIMEWAIT_LEN); | 5726 | inet_csk_reset_keepalive_timer(sk, tmo - TCP_TIMEWAIT_LEN); |
5729 | } else if (th->fin || sock_owned_by_user(sk)) { | 5727 | } else if (th->fin || sock_owned_by_user(sk)) { |
5730 | /* Bad case. We could lose such FIN otherwise. | 5728 | /* Bad case. We could lose such FIN otherwise. |
5731 | * It is not a big problem, but it looks confusing | 5729 | * It is not a big problem, but it looks confusing |
5732 | * and not so rare event. We still can lose it now, | 5730 | * and not so rare event. We still can lose it now, |
5733 | * if it spins in bh_lock_sock(), but it is really | 5731 | * if it spins in bh_lock_sock(), but it is really |
5734 | * marginal case. | 5732 | * marginal case. |
5735 | */ | 5733 | */ |
5736 | inet_csk_reset_keepalive_timer(sk, tmo); | 5734 | inet_csk_reset_keepalive_timer(sk, tmo); |
5737 | } else { | 5735 | } else { |
5738 | tcp_time_wait(sk, TCP_FIN_WAIT2, tmo); | 5736 | tcp_time_wait(sk, TCP_FIN_WAIT2, tmo); |
5739 | goto discard; | 5737 | goto discard; |
5740 | } | ||
5741 | } | 5738 | } |
5742 | } | 5739 | } |
5743 | break; | 5740 | } |
5741 | break; | ||
5744 | 5742 | ||
5745 | case TCP_CLOSING: | 5743 | case TCP_CLOSING: |
5746 | if (tp->snd_una == tp->write_seq) { | 5744 | if (tp->snd_una == tp->write_seq) { |
5747 | tcp_time_wait(sk, TCP_TIME_WAIT, 0); | 5745 | tcp_time_wait(sk, TCP_TIME_WAIT, 0); |
5748 | goto discard; | 5746 | goto discard; |
5749 | } | 5747 | } |
5750 | break; | 5748 | break; |
5751 | 5749 | ||
5752 | case TCP_LAST_ACK: | 5750 | case TCP_LAST_ACK: |
5753 | if (tp->snd_una == tp->write_seq) { | 5751 | if (tp->snd_una == tp->write_seq) { |
5754 | tcp_update_metrics(sk); | 5752 | tcp_update_metrics(sk); |
5755 | tcp_done(sk); | 5753 | tcp_done(sk); |
5756 | goto discard; | 5754 | goto discard; |
5757 | } | ||
5758 | break; | ||
5759 | } | 5755 | } |
5756 | break; | ||
5760 | } | 5757 | } |
5761 | 5758 | ||
5762 | /* step 6: check the URG bit */ | 5759 | /* step 6: check the URG bit */ |