aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv4/tcp_input.c
diff options
context:
space:
mode:
authorJoe Perches <joe@perches.com>2013-05-24 14:36:13 -0400
committerDavid S. Miller <davem@davemloft.net>2013-05-26 02:22:18 -0400
commit61eb900352ff731d990d5415ce9f04e4af6a6136 (patch)
tree1e2e4e18bf6ad79e7c9b7c9ec59b736ec86468b6 /net/ipv4/tcp_input.c
parent1f6afc81088a1f5a472b272408730c73b72c68aa (diff)
tcp: Remove another indentation level in tcp_rcv_state_process
case TCP_SYN_RECV: can have another indentation level removed by converting if (acceptable) { ...; } else { return 1; } to if (!acceptable) return 1; ...; Reflow code and comments to fit 80 columns. Another pure cleanup patch. Signed-off-by: Joe Perches <joe@perches.com> Improved-by: Eric Dumazet <eric.dumazet@gmail.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv4/tcp_input.c')
-rw-r--r--net/ipv4/tcp_input.c110
1 files changed, 51 insertions, 59 deletions
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 40614257d2c5..413b480b9329 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -5612,70 +5612,62 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
5612 5612
5613 switch (sk->sk_state) { 5613 switch (sk->sk_state) {
5614 case TCP_SYN_RECV: 5614 case TCP_SYN_RECV:
5615 if (acceptable) { 5615 if (!acceptable)
5616 /* Once we leave TCP_SYN_RECV, we no longer 5616 return 1;
5617 * need req so release it.
5618 */
5619 if (req) {
5620 tcp_synack_rtt_meas(sk, req);
5621 tp->total_retrans = req->num_retrans;
5622 5617
5623 reqsk_fastopen_remove(sk, req, false); 5618 /* Once we leave TCP_SYN_RECV, we no longer need req
5624 } else { 5619 * so release it.
5625 /* Make sure socket is routed, for 5620 */
5626 * correct metrics. 5621 if (req) {
5627 */ 5622 tcp_synack_rtt_meas(sk, req);
5628 icsk->icsk_af_ops->rebuild_header(sk); 5623 tp->total_retrans = req->num_retrans;
5629 tcp_init_congestion_control(sk);
5630 5624
5631 tcp_mtup_init(sk); 5625 reqsk_fastopen_remove(sk, req, false);
5632 tcp_init_buffer_space(sk); 5626 } else {
5633 tp->copied_seq = tp->rcv_nxt; 5627 /* Make sure socket is routed, for correct metrics. */
5634 } 5628 icsk->icsk_af_ops->rebuild_header(sk);
5635 smp_mb(); 5629 tcp_init_congestion_control(sk);
5636 tcp_set_state(sk, TCP_ESTABLISHED); 5630
5637 sk->sk_state_change(sk); 5631 tcp_mtup_init(sk);
5638 5632 tcp_init_buffer_space(sk);
5639 /* Note, that this wakeup is only for marginal 5633 tp->copied_seq = tp->rcv_nxt;
5640 * crossed SYN case. Passively open sockets 5634 }
5641 * are not waked up, because sk->sk_sleep == 5635 smp_mb();
5642 * NULL and sk->sk_socket == NULL. 5636 tcp_set_state(sk, TCP_ESTABLISHED);
5643 */ 5637 sk->sk_state_change(sk);
5644 if (sk->sk_socket)
5645 sk_wake_async(sk, SOCK_WAKE_IO, POLL_OUT);
5646
5647 tp->snd_una = TCP_SKB_CB(skb)->ack_seq;
5648 tp->snd_wnd = ntohs(th->window) <<
5649 tp->rx_opt.snd_wscale;
5650 tcp_init_wl(tp, TCP_SKB_CB(skb)->seq);
5651
5652 if (tp->rx_opt.tstamp_ok)
5653 tp->advmss -= TCPOLEN_TSTAMP_ALIGNED;
5654
5655 if (req) {
5656 /* Re-arm the timer because data may
5657 * have been sent out. This is similar
5658 * to the regular data transmission case
5659 * when new data has just been ack'ed.
5660 *
5661 * (TFO) - we could try to be more aggressive
5662 * and retransmitting any data sooner based
5663 * on when they are sent out.
5664 */
5665 tcp_rearm_rto(sk);
5666 } else
5667 tcp_init_metrics(sk);
5668 5638
5669 /* Prevent spurious tcp_cwnd_restart() on 5639 /* Note, that this wakeup is only for marginal crossed SYN case.
5670 * first data packet. 5640 * Passively open sockets are not waked up, because
5641 * sk->sk_sleep == NULL and sk->sk_socket == NULL.
5642 */
5643 if (sk->sk_socket)
5644 sk_wake_async(sk, SOCK_WAKE_IO, POLL_OUT);
5645
5646 tp->snd_una = TCP_SKB_CB(skb)->ack_seq;
5647 tp->snd_wnd = ntohs(th->window) << tp->rx_opt.snd_wscale;
5648 tcp_init_wl(tp, TCP_SKB_CB(skb)->seq);
5649
5650 if (tp->rx_opt.tstamp_ok)
5651 tp->advmss -= TCPOLEN_TSTAMP_ALIGNED;
5652
5653 if (req) {
5654 /* Re-arm the timer because data may have been sent out.
5655 * This is similar to the regular data transmission case
5656 * when new data has just been ack'ed.
5657 *
5658 * (TFO) - we could try to be more aggressive and
5659 * retransmitting any data sooner based on when they
5660 * are sent out.
5671 */ 5661 */
5672 tp->lsndtime = tcp_time_stamp; 5662 tcp_rearm_rto(sk);
5663 } else
5664 tcp_init_metrics(sk);
5673 5665
5674 tcp_initialize_rcv_mss(sk); 5666 /* Prevent spurious tcp_cwnd_restart() on first data packet */
5675 tcp_fast_path_on(tp); 5667 tp->lsndtime = tcp_time_stamp;
5676 } else { 5668
5677 return 1; 5669 tcp_initialize_rcv_mss(sk);
5678 } 5670 tcp_fast_path_on(tp);
5679 break; 5671 break;
5680 5672
5681 case TCP_FIN_WAIT1: 5673 case TCP_FIN_WAIT1: